fabio-0.6.0/0000755001611600070440000000000013227375744013726 5ustar kiefferscisoft00000000000000fabio-0.6.0/pyproject.toml0000644001611600070440000000015213227357030016624 0ustar kiefferscisoft00000000000000[build-system] requires = [ "wheel", "setuptools", "numpy", "sphinx", "Cython>=0.25" ]fabio-0.6.0/MANIFEST.in0000644001611600070440000000143213227375626015463 0ustar kiefferscisoft00000000000000# Patterns to exclude from any directory global-exclude *~ global-exclude *.pyc global-exclude *.pyo global-exclude .git global-exclude .ipynb_checkpoints recursive-include package/debian? * recursive-include fabio/ext *.c *.h *.pyx recursive-exclude test/tiftest * recursive-exclude test/testimages * recursive-exclude testimages * recursive-exclude fabio.egg-info * recursive-exclude build * recursive-exclude dist * recursive-exclude pylint * include version.py include stdeb.cfg include setup.cfg exclude MANIFEST include MANIFEST.in include build-deb.sh include run_tests.py include bootstrap.py include README.rst include copyright include requirements.txt include pyproject.toml #Include doc without checkpoints recursive-include doc * recursive-exclude doc .ipynb_checkpoints/*.ipynb fabio-0.6.0/fabio.egg-info/0000755001611600070440000000000013227375744016500 5ustar kiefferscisoft00000000000000fabio-0.6.0/fabio.egg-info/SOURCES.txt0000644001611600070440000000766213227375744020377 0ustar kiefferscisoft00000000000000MANIFEST.in README.rst bootstrap.py build-deb.sh copyright pyproject.toml requirements.txt run_tests.py setup.py stdeb.cfg version.py doc/Makefile doc/make.bat doc/source/Changelog.rst doc/source/conf.py doc/source/coverage.rst doc/source/getting_started.rst doc/source/index.rst doc/source/install.rst doc/source/mathjax.py doc/source/performances.rst doc/source/api/modules.rst doc/source/img/viewer.png doc/source/man/fabio_viewer.rst doc/source/tutorials/Nexus2cbf.ipynb doc/source/tutorials/convert_CBF.ipynb doc/source/tutorials/index.rst fabio/GEimage.py fabio/HiPiCimage.py fabio/OXDimage.py fabio/TiffIO.py fabio/__init__.py fabio/adscimage.py fabio/binaryimage.py fabio/bruker100image.py fabio/brukerimage.py fabio/cbfimage.py fabio/compression.py fabio/converters.py fabio/datIO.py fabio/directories.py fabio/dm3image.py fabio/edfimage.py fabio/eigerimage.py fabio/fabioformats.py fabio/fabioimage.py fabio/fabioutils.py fabio/file_series.py fabio/fit2dimage.py fabio/fit2dmaskimage.py fabio/fit2dspreadsheetimage.py fabio/hdf5image.py fabio/jpeg2kimage.py fabio/jpegimage.py fabio/kcdimage.py fabio/mar345image.py fabio/marccdimage.py fabio/mpaimage.py fabio/mrcimage.py fabio/nexus.py fabio/numpyimage.py fabio/openimage.py fabio/pilatusimage.py fabio/pixiimage.py fabio/pnmimage.py fabio/raxisimage.py fabio/readbytestream.py fabio/setup.py fabio/speimage.py fabio/templateimage.py fabio/tifimage.py fabio/xsdimage.py fabio/app/__init__.py fabio/app/_matplotlib.py fabio/app/_qt.py fabio/app/convert.py fabio/app/setup.py fabio/app/viewer.py fabio/benchmark/__init__.py fabio/benchmark/setup.py fabio/ext/__init__.py fabio/ext/_cif.pyx fabio/ext/byte_offset.pyx fabio/ext/cf_io.pyx fabio/ext/mar345_IO.pyx fabio/ext/setup.py fabio/ext/include/ccp4_pack.h fabio/ext/include/columnfile.h fabio/ext/include/msvc/stdint.h fabio/ext/src/ccp4_pack.c fabio/ext/src/cf_iomodule.c fabio/ext/src/columnfile.c fabio/test/__init__.py fabio/test/profile_all.py fabio/test/setup.py fabio/test/testGEimage.py fabio/test/testOXDimage.py fabio/test/testXSDimage.py fabio/test/test_all.py fabio/test/test_all_images.py fabio/test/test_failing_files.py fabio/test/test_file_series.py fabio/test/test_filename_steps.py fabio/test/test_flat_binary.py fabio/test/test_formats.py fabio/test/test_nexus.py fabio/test/testadscimage.py fabio/test/testbruker100image.py fabio/test/testbrukerimage.py fabio/test/testcbfimage.py fabio/test/testcompression.py fabio/test/testdm3image.py fabio/test/testedfimage.py fabio/test/testeigerimage.py fabio/test/testfabioconvert.py fabio/test/testfabioimage.py fabio/test/testfilenames.py fabio/test/testfit2dimage.py fabio/test/testfit2dmaskimage.py fabio/test/testhdf5image.py fabio/test/testheadernotsingleton.py fabio/test/testjpeg2kimage.py fabio/test/testjpegimage.py fabio/test/testkcdimage.py fabio/test/testmar345image.py fabio/test/testmccdimage.py fabio/test/testmpaimage.py fabio/test/testnumpyimage.py fabio/test/testopenheader.py fabio/test/testopenimage.py fabio/test/testpilatusimage.py fabio/test/testpnmimage.py fabio/test/testraxisimage.py fabio/test/testspeimage.py fabio/test/testtifimage.py fabio/test/utilstest.py fabio/third_party/__init__.py fabio/third_party/argparse.py fabio/third_party/gzip.py fabio/third_party/ordereddict.py fabio/third_party/setup.py fabio/third_party/six.py fabio/third_party/_local/__init__.py fabio/third_party/_local/argparse.py fabio/third_party/_local/gzip.py fabio/third_party/_local/ordereddict.py fabio/third_party/_local/setup.py fabio/third_party/_local/six.py fabio/utils/__init__.py fabio/utils/mathutils.py fabio/utils/pilutils.py fabio/utils/setup.py package/debian8/changelog package/debian8/compat package/debian8/control package/debian8/python-fabio-doc.doc-base package/debian8/rules package/debian8/watch package/debian8/source/format package/debian9/changelog package/debian9/compat package/debian9/control package/debian9/python-fabio-doc.doc-base package/debian9/rules package/debian9/watch package/debian9/source/formatfabio-0.6.0/package/0000755001611600070440000000000013227375744015321 5ustar kiefferscisoft00000000000000fabio-0.6.0/package/debian8/0000755001611600070440000000000013227375744016633 5ustar kiefferscisoft00000000000000fabio-0.6.0/package/debian8/compat0000644001611600070440000000000213227357030020015 0ustar kiefferscisoft000000000000009 fabio-0.6.0/package/debian8/rules0000755001611600070440000000342113227357030017677 0ustar kiefferscisoft00000000000000#!/usr/bin/make -f export PYBUILD_NAME=fabio %: dh $@ --with python2,python3,sphinxdoc --buildsystem=pybuild override_dh_clean: dh_clean rm -f $(patsubst %.pyx,%.c,$(wildcard fabio/ext/*.pyx)) rm -rf build/html rm -rf *.egg-info override_dh_auto_build: dh_auto_build #PYBUILD_SYSTEM=custom \ #PYBUILD_BUILD_ARGS="PYTHONPATH={build_dir} http_proxy='localhost' sphinx-build -N -bhtml doc/source build/html" dh_auto_build python setup.py build build_man build_doc # unactive test for now override_dh_auto_test: PYBUILD_SYSTEM=custom \ PYBUILD_TEST_ARGS="PYTHONPATH={build_dir} FABIO_TESTIMAGES=testimages {interpreter} ./run_tests.py --installed" dh_auto_test override_dh_install: dh_numpy dh_numpy3 # move the scripts to right package dh_install -p fabio-bin debian/python-fabio/usr/bin usr/ # remove all scripts installed by pybuild rm -rf debian/python-fabio/usr/bin rm -rf debian/python-fabio-dbg/usr/bin rm -rf debian/python3-fabio/usr/bin rm -rf debian/python3-fabio-dbg/usr/bin # remove all py/pyc/egg-info files from dbg packages find debian/python-fabio-dbg/usr -name "*.py" -type f -delete find debian/python-fabio-dbg/usr -name "*.pyc" -type f -delete find debian/python-fabio-dbg/usr -path '*/*.egg-info/*' -delete find debian/python-fabio-dbg/usr -name "*.egg-info" -type d -empty -delete find debian/python3-fabio-dbg/usr -name "*.py" -type f -delete find debian/python3-fabio-dbg/usr -name "*.pyc" -type f -delete find debian/python3-fabio-dbg/usr -path '*/*.egg-info/*' -delete find debian/python3-fabio-dbg/usr -name "*.egg-info" -type d -empty -delete #Finally install stuff dh_install override_dh_installman: dh_installman -p fabio-bin build/man/*.1 override_dh_installdocs: dh_installdocs "build/sphinx/html" -p python-fabio-doc dh_installdocs fabio-0.6.0/package/debian8/python-fabio-doc.doc-base0000644001611600070440000000047413227357030023365 0ustar kiefferscisoft00000000000000Document: fabio-manual Title: Fabio documentation manual Author: Jérôme Kieffer Abstract: I/O library for images produced by 2D X-ray detector Section: Science/Data Analysis Format: HTML Index: /usr/share/doc/python-fabio-doc/html/index.html Files: /usr/share/doc/python-fabio-doc/html/* fabio-0.6.0/package/debian8/watch0000644001611600070440000000026713227357030017655 0ustar kiefferscisoft00000000000000version=3 opts=repacksuffix=+dfsg,\ uversionmangle=s/(rc|a|b|c)/~$1/,\ dversionmangle=s/\+dfsg// \ http://pypi.debian.net/fabio/fabio-(.+)\.(?:zip|tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz))) fabio-0.6.0/package/debian8/source/0000755001611600070440000000000013227375744020133 5ustar kiefferscisoft00000000000000fabio-0.6.0/package/debian8/source/format0000644001611600070440000000001413227357030021325 0ustar kiefferscisoft000000000000003.0 (quilt) fabio-0.6.0/package/debian8/control0000644001611600070440000001345013227357030020225 0ustar kiefferscisoft00000000000000Source: python-fabio Maintainer: Debian Science Maintainers Uploaders: Jerome Kieffer , Picca Frédéric-Emmanuel Section: science Priority: extra Build-Depends: cython, cython-dbg, cython3, cython3-dbg, debhelper , dh-python, pymca, python-all-dev, python-all-dbg, python-imaging, python-imaging-dbg, python-lxml, python-lxml-dbg, python-numpy, python-numpy-dbg, python-setuptools, python-six, python-sphinx, python-sphinxcontrib.programoutput, python3-all-dev, python3-all-dbg, python3-lxml, python3-lxml-dbg, python3-numpy, python3-numpy-dbg, python3-pil, python3-pil-dbg, python3-setuptools, python3-six, python3-sphinx, python3-sphinxcontrib.programoutput, help2man Standards-Version: 3.9.6 Vcs-Browser: https://anonscm.debian.org/cgit/debian-science/packages/python-fabio.git Vcs-Git: git://anonscm.debian.org/debian-science/packages/python-fabio.git Homepage: https://github.com/silx-kit/fabio X-Python-Version: >= 2.5 X-Python3-Version: >= 3.2 Package: fabio-bin Architecture: all Section: python Depends: ${misc:Depends}, ${python:Depends}, python-fabio (>= ${source:Version}) Description: Binaries provided with python-fabio. . Contains a viewer and a converter for images produced by 2D X-ray detector. Package: python-fabio Architecture: any Section: python Depends: ${misc:Depends}, ${python:Depends}, ${shlibs:Depends}, pymca, python-six Recommends: fabio-bin, fabio-convert, python-imaging, python-lxml, python-matplotlib Suggests: pyfai, python-fabio-doc Description: I/O library for images produced by 2D X-ray detector - Python2 FabIO is an I/O library for images produced by 2D X-ray detectors and written in Python. FabIO support images detectors from a dozen of companies (including Mar, Dectris, ADSC, Hamamatsu, Oxford, ...), for a total of 20 different file formats (like CBF, EDF, TIFF, ...) and offers an unified interface to their headers (as a Python dictionary) and datasets (as a numpy ndarray of integers or floats) . This is the Python 2 version of the package. Package: python-fabio-dbg Architecture: any Section: debug Depends: ${misc:Depends}, ${python:Depends}, ${shlibs:Depends}, python-fabio (= ${binary:Version}) Recommends: python-dbg, python-imaging-dbg, python-lxml-dbg, python-matplotlib-dbg Description: I/O library for images produced by 2D X-ray detector - Python2 debug FabIO is an I/O library for images produced by 2D X-ray detectors and written in Python. FabIO support images detectors from a dozen of companies (including Mar, Dectris, ADSC, Hamamatsu, Oxford, ...), for a total of 20 different file formats (like CBF, EDF, TIFF, ...) and offers an unified interface to their headers (as a Python dictionary) and datasets (as a numpy ndarray of integers or floats) . This package contains the extension built for the Python 2 debug interpreter. Package: python3-fabio Architecture: any Section: python Depends: ${misc:Depends}, ${python3:Depends}, ${shlibs:Depends}, pymca, python3-six Recommends: python3-lxml, python3-matplotlib, python3-pil Suggests: pyfai, python-fabio-doc Description: I/O library for images produced by 2D X-ray detector - Python3 FabIO is an I/O library for images produced by 2D X-ray detectors and written in Python. FabIO support images detectors from a dozen of companies (including Mar, Dectris, ADSC, Hamamatsu, Oxford, ...), for a total of 20 different file formats (like CBF, EDF, TIFF, ...) and offers an unified interface to their headers (as a Python dictionary) and datasets (as a numpy ndarray of integers or floats) . This is the Python 3 version of the package. Package: python3-fabio-dbg Architecture: any Section: debug Depends: ${misc:Depends}, ${python3:Depends}, ${shlibs:Depends}, python3-fabio (= ${binary:Version}) Recommends: python3-dbg, python3-lxml-dbg, python3-matplotlib-dbg, python3-pil-dbg Description: I/O library for images produced by 2D X-ray detector - Python3 debug FabIO is an I/O library for images produced by 2D X-ray detectors and written in Python. FabIO support images detectors from a dozen of companies (including Mar, Dectris, ADSC, Hamamatsu, Oxford, ...), for a total of 20 different file formats (like CBF, EDF, TIFF, ...) and offers an unified interface to their headers (as a Python dictionary) and datasets (as a numpy ndarray of integers or floats) . This package contains the extension built for the Python 3 debug interpreter. Package: python-fabio-doc Architecture: all Section: doc Depends: ${misc:Depends}, ${sphinxdoc:Depends} Description: I/O library for images produced by 2D X-ray detector - documentation FabIO is an I/O library for images produced by 2D X-ray detectors and written in Python. FabIO support images detectors from a dozen of companies (including Mar, Dectris, ADSC, Hamamatsu, Oxford, ...), for a total of 20 different file formats (like CBF, EDF, TIFF, ...) and offers an unified interface to their headers (as a Python dictionary) and datasets (as a numpy ndarray of integers or floats) . This is the common documentation package. fabio-0.6.0/package/debian8/changelog0000644001611600070440000000742113227357030020475 0ustar kiefferscisoft00000000000000python-fabio (0.3.0b3-1~bpo8+2) jessie-backports; urgency=medium * Rebuild for jessie-backports. * fabio snapshot built for debian 8 -- Jerome Kieffer Thu, 16 Jun 2016 12:05:29 +0200 python-fabio (0.3.0b3-1~bpo8+1) jessie-backports; urgency=medium * Rebuild for jessie-backports. * fabio built for debian 8 -- Jerome Kieffer Thu, 03 Mar 2016 17:08:44 +0100 python-fabio (0.3.0b3-1) jessie-backports; urgency=low * Rebuild for jessie-backports. * new upstream/master 0.3.0 beta -- Jerome Kieffer Thu, 29 Oct 2015 16:02:36 +0100 python-fabio (0.2.2+dfsg-3~bpo8+1) jessie-backports; urgency=medium * Backport to stable. -- Picca Frédéric-Emmanuel Mon, 05 Oct 2015 14:44:48 +0200 python-fabio (0.2.2+dfsg-3) unstable; urgency=medium * debian/control - fix Dependency to deal with backports (0.2.2+dfsg-2 -> 0.2.2+dfsg-2~) - remove the circular dependency (Closes: #794153) (python-fabio only Recommends fabio-viewer) -- Picca Frédéric-Emmanuel Tue, 29 Sep 2015 10:20:19 +0200 python-fabio (0.2.2+dfsg-2) unstable; urgency=medium * Add python3 modules and put the scripts into fabio-viewer -- Picca Frédéric-Emmanuel Mon, 27 Jul 2015 10:03:05 +0200 python-fabio (0.2.2+dfsg-1) unstable; urgency=medium * Imported Upstream version 0.2.2+dfsg * Repack in order to exclude third_party modules * debian/control - Bump Standard-Versions to 3.9.6 (no change) - Add Build-Depends: python-six, pymca, python-sphinxcontrib.programoutput * debian/patchs - 0001-fix-the-build-system.patch (added) * debian/watch - use the PyPi redirector -- Picca Frédéric-Emmanuel Wed, 22 Jul 2015 16:13:10 +0200 python-fabio (0.1.4-1) unstable; urgency=medium * Imported Upstream version 0.1.4 (Closes: #735432, #693121) * Run all tests during the build (thanks to pybuild) * debian/control - Add Build-Depends: dh-python for pybuild * debian/compat - switch to compat level 9 * debian/copyright - updated for 0.1.4 version * debian/patchs - deleted (applyed by upstream) - 0001-forwarded-upstream-cythonize-during-the-build.patch - 0002-compat_2.5.patch * debian/rules - use the pybuild buildsystem * debian/watch - use the sourceforge redirector -- Picca Frédéric-Emmanuel Sat, 05 Apr 2014 09:24:15 +0200 python-fabio (0.1.3-3) unstable; urgency=medium * fix the FTBFS when sphinxdoc is not installed -- Picca Frédéric-Emmanuel Wed, 15 Jan 2014 19:32:16 +0100 python-fabio (0.1.3-2) unstable; urgency=medium [kieffer] * Fix Lintian remarks * Correct compatibility with Python 2.5 [picca] * debian/control - Bump Standard-Versions to 3.9.5 (no change) - Add the python-fabio-dbg package - Add the python-fabio-doc package - Team maintained under debian-science - Build-Depends: add cython * debian/patch + 0001-forwarded-upstream-cythonize-during-the-build.patch (new) -- Picca Frédéric-Emmanuel Fri, 27 Dec 2013 09:46:21 +0100 python-fabio (0.1.3-1) unstable; urgency=low [kieffer] * New upstream version (v0.1.3) [picca] * debian/control - use the right python-all-dev version dependencies for dh_python2. - remove had coded numpy dependencies which are generated by dh_numpy. - add myself as Uploaders. - update the homepage add the VCS informations. -- Picca Frédéric-Emmanuel Sat, 02 Nov 2013 21:40:48 +0100 python-fabio (0.0.8-1) unstable; urgency=low * Initial release (Closes: #649008) -- Jerome Kieffer Fri, 18 Nov 2011 16:19:20 +0100 fabio-0.6.0/package/debian9/0000755001611600070440000000000013227375744016634 5ustar kiefferscisoft00000000000000fabio-0.6.0/package/debian9/compat0000644001611600070440000000000213227357030020016 0ustar kiefferscisoft000000000000009 fabio-0.6.0/package/debian9/rules0000755001611600070440000000215413227357030017702 0ustar kiefferscisoft00000000000000#!/usr/bin/make -f export PYBUILD_NAME=fabio %: dh $@ --with python2,python3,sphinxdoc --buildsystem=pybuild override_dh_clean: dh_clean rm -f $(patsubst %.pyx,%.c,$(wildcard fabio/ext/*.pyx)) rm -rf build/html rm -rf *.egg-info override_dh_auto_build: dh_auto_build #PYBUILD_SYSTEM=custom \ #PYBUILD_BUILD_ARGS="PYTHONPATH={build_dir} http_proxy='localhost' sphinx-build -N -bhtml doc/source build/html" dh_auto_build python setup.py build build_man build_doc # unactive test for now override_dh_auto_test: PYBUILD_SYSTEM=custom \ PYBUILD_TEST_ARGS="PYTHONPATH={build_dir} FABIO_TESTIMAGES=testimages {interpreter} ./run_tests.py --installed" dh_auto_test override_dh_install: dh_numpy dh_numpy3 # move the scripts to right package dh_install -p fabio-bin debian/python-fabio/usr/bin usr/ # remove all scripts installed by pybuild rm -rf debian/python-fabio/usr/bin rm -rf debian/python3-fabio/usr/bin #Finally install stuff dh_install override_dh_installman: dh_installman -p fabio-bin build/man/*.1 override_dh_installdocs: dh_installdocs "build/sphinx/html" -p python-fabio-doc dh_installdocs fabio-0.6.0/package/debian9/python-fabio-doc.doc-base0000644001611600070440000000047413227357030023366 0ustar kiefferscisoft00000000000000Document: fabio-manual Title: Fabio documentation manual Author: Jérôme Kieffer Abstract: I/O library for images produced by 2D X-ray detector Section: Science/Data Analysis Format: HTML Index: /usr/share/doc/python-fabio-doc/html/index.html Files: /usr/share/doc/python-fabio-doc/html/* fabio-0.6.0/package/debian9/watch0000644001611600070440000000026713227357030017656 0ustar kiefferscisoft00000000000000version=3 opts=repacksuffix=+dfsg,\ uversionmangle=s/(rc|a|b|c)/~$1/,\ dversionmangle=s/\+dfsg// \ http://pypi.debian.net/fabio/fabio-(.+)\.(?:zip|tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz))) fabio-0.6.0/package/debian9/source/0000755001611600070440000000000013227375744020134 5ustar kiefferscisoft00000000000000fabio-0.6.0/package/debian9/source/format0000644001611600070440000000001413227357030021326 0ustar kiefferscisoft000000000000003.0 (quilt) fabio-0.6.0/package/debian9/control0000644001611600070440000000754213227357030020233 0ustar kiefferscisoft00000000000000Source: python-fabio Maintainer: Debian Science Maintainers Uploaders: Jerome Kieffer , Picca Frédéric-Emmanuel Section: science Priority: extra Build-Depends: cython, cython-dbg, cython3, cython3-dbg, debhelper , dh-python, python-all-dev, python-imaging, python-lxml, python-numpy, python-setuptools, python-six, python-sphinx, python-sphinxcontrib.programoutput, python3-all-dev, python3-lxml, python3-numpy, python3-pil, python3-setuptools, python3-six, python3-sphinx, python3-sphinxcontrib.programoutput, python-h5py, python3-h5py, help2man Standards-Version: 3.9.6 Vcs-Browser: https://anonscm.debian.org/cgit/debian-science/packages/python-fabio.git Vcs-Git: git://anonscm.debian.org/debian-science/packages/python-fabio.git Homepage: https://github.com/silx-kit/fabio X-Python-Version: >= 2.5 X-Python3-Version: >= 3.2 Package: fabio-bin Architecture: all Section: python Depends: ${misc:Depends}, ${python:Depends}, python-fabio (>= ${source:Version}) Description: Binaries provided with python-fabio. . Contains a viewer and a converter for images produced by 2D X-ray detector. Package: python-fabio Architecture: any Section: python Depends: ${misc:Depends}, ${python:Depends}, ${shlibs:Depends}, python-six Recommends: fabio-bin, python-imaging, python-lxml, python-matplotlib, python-h5py Suggests: pyfai, python-fabio-doc Description: I/O library for images produced by 2D X-ray detector - Python2 FabIO is an I/O library for images produced by 2D X-ray detectors and written in Python. FabIO support images detectors from a dozen of companies (including Mar, Dectris, ADSC, Hamamatsu, Oxford, ...), for a total of 20 different file formats (like CBF, EDF, TIFF, ...) and offers an unified interface to their headers (as a Python dictionary) and datasets (as a numpy ndarray of integers or floats) . This is the Python 2 version of the package. Package: python3-fabio Architecture: any Section: python Depends: ${misc:Depends}, ${python3:Depends}, ${shlibs:Depends}, python3-six Recommends: python3-lxml, python3-matplotlib, python3-pil, python3-h5py Suggests: pyfai, python-fabio-doc Description: I/O library for images produced by 2D X-ray detector - Python3 FabIO is an I/O library for images produced by 2D X-ray detectors and written in Python. FabIO support images detectors from a dozen of companies (including Mar, Dectris, ADSC, Hamamatsu, Oxford, ...), for a total of 20 different file formats (like CBF, EDF, TIFF, ...) and offers an unified interface to their headers (as a Python dictionary) and datasets (as a numpy ndarray of integers or floats) . This is the Python 3 version of the package. Package: python-fabio-doc Architecture: all Section: doc Depends: ${misc:Depends}, ${sphinxdoc:Depends} Description: I/O library for images produced by 2D X-ray detector - documentation FabIO is an I/O library for images produced by 2D X-ray detectors and written in Python. FabIO support images detectors from a dozen of companies (including Mar, Dectris, ADSC, Hamamatsu, Oxford, ...), for a total of 20 different file formats (like CBF, EDF, TIFF, ...) and offers an unified interface to their headers (as a Python dictionary) and datasets (as a numpy ndarray of integers or floats) . This is the common documentation package. fabio-0.6.0/package/debian9/changelog0000644001611600070440000000742113227357030020476 0ustar kiefferscisoft00000000000000python-fabio (0.3.0b3-1~bpo8+2) jessie-backports; urgency=medium * Rebuild for jessie-backports. * fabio snapshot built for debian 8 -- Jerome Kieffer Thu, 16 Jun 2016 12:05:29 +0200 python-fabio (0.3.0b3-1~bpo8+1) jessie-backports; urgency=medium * Rebuild for jessie-backports. * fabio built for debian 8 -- Jerome Kieffer Thu, 03 Mar 2016 17:08:44 +0100 python-fabio (0.3.0b3-1) jessie-backports; urgency=low * Rebuild for jessie-backports. * new upstream/master 0.3.0 beta -- Jerome Kieffer Thu, 29 Oct 2015 16:02:36 +0100 python-fabio (0.2.2+dfsg-3~bpo8+1) jessie-backports; urgency=medium * Backport to stable. -- Picca Frédéric-Emmanuel Mon, 05 Oct 2015 14:44:48 +0200 python-fabio (0.2.2+dfsg-3) unstable; urgency=medium * debian/control - fix Dependency to deal with backports (0.2.2+dfsg-2 -> 0.2.2+dfsg-2~) - remove the circular dependency (Closes: #794153) (python-fabio only Recommends fabio-viewer) -- Picca Frédéric-Emmanuel Tue, 29 Sep 2015 10:20:19 +0200 python-fabio (0.2.2+dfsg-2) unstable; urgency=medium * Add python3 modules and put the scripts into fabio-viewer -- Picca Frédéric-Emmanuel Mon, 27 Jul 2015 10:03:05 +0200 python-fabio (0.2.2+dfsg-1) unstable; urgency=medium * Imported Upstream version 0.2.2+dfsg * Repack in order to exclude third_party modules * debian/control - Bump Standard-Versions to 3.9.6 (no change) - Add Build-Depends: python-six, pymca, python-sphinxcontrib.programoutput * debian/patchs - 0001-fix-the-build-system.patch (added) * debian/watch - use the PyPi redirector -- Picca Frédéric-Emmanuel Wed, 22 Jul 2015 16:13:10 +0200 python-fabio (0.1.4-1) unstable; urgency=medium * Imported Upstream version 0.1.4 (Closes: #735432, #693121) * Run all tests during the build (thanks to pybuild) * debian/control - Add Build-Depends: dh-python for pybuild * debian/compat - switch to compat level 9 * debian/copyright - updated for 0.1.4 version * debian/patchs - deleted (applyed by upstream) - 0001-forwarded-upstream-cythonize-during-the-build.patch - 0002-compat_2.5.patch * debian/rules - use the pybuild buildsystem * debian/watch - use the sourceforge redirector -- Picca Frédéric-Emmanuel Sat, 05 Apr 2014 09:24:15 +0200 python-fabio (0.1.3-3) unstable; urgency=medium * fix the FTBFS when sphinxdoc is not installed -- Picca Frédéric-Emmanuel Wed, 15 Jan 2014 19:32:16 +0100 python-fabio (0.1.3-2) unstable; urgency=medium [kieffer] * Fix Lintian remarks * Correct compatibility with Python 2.5 [picca] * debian/control - Bump Standard-Versions to 3.9.5 (no change) - Add the python-fabio-dbg package - Add the python-fabio-doc package - Team maintained under debian-science - Build-Depends: add cython * debian/patch + 0001-forwarded-upstream-cythonize-during-the-build.patch (new) -- Picca Frédéric-Emmanuel Fri, 27 Dec 2013 09:46:21 +0100 python-fabio (0.1.3-1) unstable; urgency=low [kieffer] * New upstream version (v0.1.3) [picca] * debian/control - use the right python-all-dev version dependencies for dh_python2. - remove had coded numpy dependencies which are generated by dh_numpy. - add myself as Uploaders. - update the homepage add the VCS informations. -- Picca Frédéric-Emmanuel Sat, 02 Nov 2013 21:40:48 +0100 python-fabio (0.0.8-1) unstable; urgency=low * Initial release (Closes: #649008) -- Jerome Kieffer Fri, 18 Nov 2011 16:19:20 +0100 fabio-0.6.0/fabio/0000755001611600070440000000000013227375744015006 5ustar kiefferscisoft00000000000000fabio-0.6.0/fabio/mrcimage.py0000644001611600070440000001464413227357030017141 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: FabIO X-ray image reader # # Copyright (C) 2010-2016 European Synchrotron Radiation Facility # Grenoble, France # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """MRC image for FabIO Authors: Jerome Kieffer email: Jerome.Kieffer@terre-adelie.org Specifications from: http://ami.scripps.edu/software/mrctools/mrc_specification.php """ # Get ready for python3: from __future__ import with_statement, print_function __authors__ = ["Jérôme Kieffer"] __contact__ = "Jerome.Kieffer@terre-adelie.org" __license__ = "MIT" __copyright__ = "Jérôme Kieffer" __version__ = "29 Oct 2013" import logging import sys import numpy from .fabioimage import FabioImage from .fabioutils import previous_filename, next_filename logger = logging.getLogger(__name__) if sys.version_info < (3.0): bytes = str class MrcImage(FabioImage): """ FabIO image class for Images from a mrc image stack """ DESCRIPTION = "Medical Research Council file format for 3D electron density and 2D images" DEFAULT_EXTENSIONS = ["mrc"] KEYS = ("NX", "NY", "NZ", "MODE", "NXSTART", "NYSTART", "NZSTART", "MX", "MY", "MZ", "CELL_A", "CELL_B", "CELL_C", "CELL_ALPHA", "CELL_BETA", "CELL_GAMMA", "MAPC", "MAPR", "MAPS", "DMIN", "DMAX", "DMEAN", "ISPG", "NSYMBT", "EXTRA", "ORIGIN", "MAP", "MACHST", "RMS", "NLABL") def _readheader(self, infile): """ Read and decode the header of an image: :param infile: Opened python file (can be stringIO or bipped file) """ # list of header key to keep the order (when writing) self.header = self.check_header() # header is composed of 56-int32 plus 10x80char lines int_block = numpy.fromstring(infile.read(56 * 4), dtype=numpy.int32) for key, value in zip(self.KEYS, int_block): self.header[key] = value assert self.header["MAP"] == 542130509 # "MAP " in int32 ! for i in range(10): label = "LABEL_%02i" % i self.header[label] = infile.read(80).strip() self.dim1 = self.header["NX"] self.dim2 = self.header["NY"] self.nframes = self.header["NZ"] mode = self.header["MODE"] if mode == 0: self.bytecode = numpy.int8 elif mode == 1: self.bytecode = numpy.int16 elif mode == 2: self.bytecode = numpy.float32 elif mode == 3: self.bytecode = numpy.complex64 elif mode == 4: self.bytecode = numpy.complex64 elif mode == 6: self.bytecode = numpy.uint16 self.imagesize = self.dim1 * self.dim2 * numpy.dtype(self.bytecode).itemsize def read(self, fname, frame=None): """ try to read image :param fname: name of the file :param frame: """ self.resetvals() self.sequencefilename = fname self.currentframe = frame or 0 with self._open(fname) as infile: self._readheader(infile) self._readframe(infile, self.currentframe) return self def _calc_offset(self, frame): """ Calculate the frame position in the file :param frame: frame number """ assert frame < self.nframes return 1024 + frame * self.imagesize def _makeframename(self): self.filename = "%s$%04d" % (self.sequencefilename, self.currentframe) def _readframe(self, infile, img_num): """ Read a frame an populate data :param infile: opened file :param img_num: frame number (int) """ if (img_num > self.nframes or img_num < 0): raise RuntimeError("Requested frame number is out of range") _imgstart = self.header['offset'] + img_num * (512 * 476 * 2 + 24) infile.seek(self.calc_offset(img_num), 0) self.data = numpy.fromstring(infile.read(self.imagesize), self.bytecode) self.data.shape = self.dim2, self.dim1 self.currentframe = int(img_num) self._makeframename() def getframe(self, num): """ Returns a frame as a new FabioImage object :param num: frame number """ if num < 0 or num > self.nframes: raise RuntimeError("Requested frame number is out of range") # Do a deep copy of the header to make a new one frame = MrcImage(header=self.header.copy()) for key in ("dim1", "dim2", "nframes", "bytecode", "imagesize", "sequencefilename"): frame.__setattr__(key, self.__getattribute__(key)) with frame._open(self.sequencefilename, "rb") as infile: frame._readframe(infile, num) return frame def next(self): """ Get the next image in a series as a fabio image """ if self.currentframe < (self.nframes - 1) and self.nframes > 1: return self.getframe(self.currentframe + 1) else: newobj = MrcImage() newobj.read(next_filename(self.sequencefilename)) return newobj def previous(self): """ Get the previous image in a series as a fabio image """ if self.currentframe > 0: return self.getframe(self.currentframe - 1) else: newobj = MrcImage() newobj.read(previous_filename( self.sequencefilename)) return newobj mrcimage = MrcImage fabio-0.6.0/fabio/numpyimage.py0000644001611600070440000001612013227357030017517 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: FabIO X-ray image reader # # Copyright (C) 2010-2016 European Synchrotron Radiation Facility # Grenoble, France # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """Generic numpy file reader for FabIO""" # Get ready for python3: from __future__ import with_statement, print_function, division __authors__ = ["Jérôme Kieffer"] __contact__ = "jerome.kieffer@esrf.fr" __license__ = "MIT" __copyright__ = "ESRF" __date__ = "27/07/2017" import logging logger = logging.getLogger(__name__) import numpy from .fabioimage import FabioImage class NumpyImage(FabioImage): """ FabIO image class for Images for numpy array dumps Source: http://docs.scipy.org/doc/numpy/neps/npy-format.html Format Specification: Version 1.0 ================================= The first 6 bytes are a magic string: exactly “x93NUMPY”. The next 1 byte is an unsigned byte: the major version number of the file format, e.g. x01. The next 1 byte is an unsigned byte: the minor version number of the file format, e.g. x00. Note: the version of the file format is not tied to the version of the numpy package. The next 2 bytes form a little-endian unsigned short int: the length of the header data HEADER_LEN. The next HEADER_LEN bytes form the header data describing the array’s format. It is an ASCII string which contains a Python literal expression of a dictionary. It is terminated by a newline (‘n’) and padded with spaces (‘x20’) to make the total length of the magic string + 4 + HEADER_LEN be evenly divisible by 16 for alignment purposes. The dictionary contains three keys: “descr” : dtype.descr An object that can be passed as an argument to the numpy.dtype() constructor to create the array’s dtype. “fortran_order” : bool Whether the array data is Fortran-contiguous or not. Since Fortran-contiguous arrays are a common form of non-C-contiguity, we allow them to be written directly to disk for efficiency. “shape” : tuple of int The shape of the array. For repeatability and readability, this dictionary is formatted using pprint.pformat() so the keys are in alphabetic order. Following the header comes the array data. If the dtype contains Python objects (i.e. dtype.hasobject is True), then the data is a Python pickle of the array. Otherwise the data is the contiguous (either C- or Fortran-, depending on fortran_order) bytes of the array. Consumers can figure out the number of bytes by multiplying the number of elements given by the shape (noting that shape=() means there is 1 element) by dtype.itemsize. Format Specification: Version 2.0 ================================= The version 1.0 format only allowed the array header to have a total size of 65535 bytes. This can be exceeded by structured arrays with a large number of columns. The version 2.0 format extends the header size to 4 GiB. numpy.save will automatically save in 2.0 format if the data requires it, else it will always use the more compatible 1.0 format. The description of the fourth element of the header therefore has become: The next 4 bytes form a little-endian unsigned int: the length of the header data HEADER_LEN. """ DESCRIPTION = "Numpy array file format" DEFAULT_EXTENSIONS = ["npy"] def __init__(self, data=None, header=None): """ Set up initial values """ FabioImage.__init__(self, data, header) self.dataset = self.data self.slice_dataset() self.filename = "Numpy_array_%x" % id(self.dataset) def slice_dataset(self, frame=None): if self.dataset is None: return if self.dataset.ndim > 3: shape = self.dataset.shape[-2:] self.dataset.shape = (-1,) + shape elif self.dataset.ndim < 2: self.dataset.shape = 1, -1 if self.dataset.ndim == 2: self.data = self.dataset elif self.dataset.ndim == 3: self.nframes = self.dataset.shape[0] if frame is None: frame = 0 if frame < self.nframes: self.data = self.dataset[frame] self.currentframe = frame def _readheader(self, infile): """ Read and decode the header of an image: :param infile: Opened python file (can be stringIO or bzipped file) """ # list of header key to keep the order (when writing) self.header = self.check_header() infile.seek(0) def read(self, fname, frame=None): """ Try to read image :param fname: name of the file """ self.resetvals() infile = self._open(fname) self._readheader(infile) # read the image data self.dataset = numpy.load(infile) self.slice_dataset(frame) return self def write(self, fname): """ Try to write image :param fname: name of the file """ numpy.save(fname, self.dataset) def getframe(self, num): """ returns the frame numbered 'num' in the stack if applicable""" if self.nframes > 1: new_img = None if (num >= 0) and num < self.nframes: data = self.dataset[num] new_img = self.__class__(data=data, header=self.header) new_img.dataset = self.dataset new_img.nframes = self.nframes new_img.currentframe = num else: raise IndexError("getframe %s out of range [%s %s[" % (num, 0, self.nframes)) else: new_img = FabioImage.getframe(self, num) return new_img def previous(self): """ returns the previous frame in the series as a fabioimage """ return self.getframe(self.currentframe - 1) def next(self): """ returns the next frame in the series as a fabioimage """ return self.getframe(self.currentframe + 1) numpyimage = NumpyImage fabio-0.6.0/fabio/templateimage.py0000644001611600070440000001070013227357030020160 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. """Template for FabIO image reader This is a template for adding new file formats to FabIO We hope it will be relatively easy to add new file formats to fabio in the future. The basic idea is the following: 1) inherit from FabioImage overriding the methods _readheader, read and optionally write. Name your new module XXXimage where XXX means something (eg tifimage). 2) readheader fills in a dictionary of "name":"value" pairs in self.header. No one expects to find anything much in there. 3) read fills in self.data with a numpy array holding the image. Some info are automatically exposed from data: * self.dim1 and self.dim2: the image dimensions, * self.bpp is the bytes per pixel * self.bytecode is the numpy.dtype.type of the data. 4) The member variables "_need_a_seek_to_read" and "_need_a_real_file" are there in case you have trouble with the transparent handling of bz2 and gz files. 5) Add your new module as an import into fabio.fabioformats. Your class will be registered automatically. 6) Fill out the magic numbers for your format in fabio.openimage if you know them (the characteristic first few bytes in the file) 7) Upload a testimage to the file release system and create a unittest testcase which opens an example of your new format, confirming the image has actually been read in successfully (eg check the mean, max, min and esd are all correct, perhaps orientation too) 8) Run pylint on your code and then please go clean it up. Have a go at mine while you are at it, before requesting a pull-request on github. 9) Bask in the warm glow of appreciation when someone unexpectedly learns they don't need to convert their data into another format """ # Get ready for python3: from __future__ import with_statement, print_function, division __authors__ = ["author"] __contact__ = "name@institut.org" __license__ = "MIT" __copyright__ = "Institut" __date__ = "22/08/2017" import logging logger = logging.getLogger(__name__) import numpy from .fabioimage import FabioImage, OrderedDict class TemplateImage(FabioImage): """FabIO image class for Images for XXX detector Put some documentation here """ DESCRIPTION = "Name of the file format" DEFAULT_EXTENSIONS = [] def __init__(self, *arg, **kwargs): """ Generic constructor """ FabioImage.__init__(self, *arg, **kwargs) def _readheader(self, infile): """ Read and decode the header of an image: :param infile: Opened python file (can be stringIO or bipped file) """ # list of header key to keep the order (when writing) self.header = self.check_header() def read(self, fname, frame=None): """ Try to read image :param fname: name of the file :param frame: number of the frame """ self.resetvals() with self._open(fname) as infile: self._readheader(infile) # read the image data and declare it shape = (50, 60) self.data = numpy.zeros(shape, dtype=self.uint16) # Nota: dim1, dim2, bytecode and bpp are properties defined by the dataset return self # This is not compatibility with old code: templateimage = TemplateImage fabio-0.6.0/fabio/dm3image.py0000644001611600070440000002530213227357030017034 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION W """ Authors: Henning O. Sorensen & Erik Knudsen Center for Fundamental Research: Metal Structures in Four Dimensions Risoe National Laboratory Frederiksborgvej 399 DK-4000 Roskilde email:erik.knudsen@risoe.dk + Jon Wright, ESRF """ # get ready for python3 from __future__ import with_statement, print_function import sys import logging import numpy from .fabioimage import FabioImage from .third_party.six import binary_type logger = logging.getLogger(__name__) DATA_TYPES = {2: numpy.int16, 4: numpy.uint16, 3: numpy.int32, 5: numpy.uint32, 6: numpy.float32, 7: numpy.float, 8: numpy.int8, 9: None, 10: None, 15: 'Struct', 18: None, 20: None } DATA_BYTES = {2: 2, 4: 2, 3: 4, 5: 4, 6: 4, 7: 8, 8: 1, 9: None, 10: None, 15: 'Struct', 18: None, 20: None } class Dm3Image(FabioImage): """ Read and try to write the dm3 data format """ DESCRIPTION = "Digital Micrograph DM3 file format" DEFAULT_EXTENSIONS = ["dm3"] def __init__(self, *args, **kwargs): FabioImage.__init__(self, *args, **kwargs) self.encoded_datatype = None self.no_data_elements = None self.grouptag_is_sorted = None self.grouptag_is_open = None self.tag_encoded_type = None self.tag_data_type = None self.tag_is_data = None self.grouptag_no_tags = None self.bytes_in_file = None self.tag_label_length = None self.go_on = None def _readheader(self): self.infile.seek(0) file_format = self.readbytes(4, numpy.uint32)[0] # should be 3 assert file_format == 3, 'Wrong file type ' self.bytes_in_file = self.readbytes(4, numpy.uint32)[0] self.byte_order = self.readbytes(4, numpy.uint32)[0] # 0 = big, 1= little logger.info('read dm3 file - file format %s' % file_format) logger.info('Bytes in file: %s' % self.bytes_in_file) logger.info('Byte order: %s - 0 = bigEndian , 1 = littleEndian' % self.byte_order) if self.byte_order == 0: self.swap = True elif self.byte_order == 1: self.swap = False else: raise ValueError def read(self, fname, frame=None): self.header = self.check_header() self.resetvals() self.infile = self._open(fname, "rb") self._readheader() self.go_on = True while self.go_on: self.read_tag_group() self.read_tag_entry() if self.infile.tell() > self.bytes_in_file: break while self.tag_is_data == 21: self.read_tag_entry() if self.infile.tell() > self.bytes_in_file: self.go_on = False (dim1_raw, dim2_raw) = self.header['Active Size (pixels)'].split() (dim1_raw, dim2_raw) = (eval(dim1_raw), eval(dim2_raw)) (dim1_binning, dim2_binning) = self.header['Binning'].split() (dim1_binning, dim2_binning) = (eval(dim1_binning), eval(dim2_binning)) self.dim1 = dim1_raw // dim1_binning self.dim2 = dim2_raw // dim2_binning # print dim1,dim2 if "Data" in self.header: self.data = self.header[u'Data'].reshape(self.dim1, self.dim2) return self def readbytes(self, bytes_to_read, format, swap=True): raw = self.infile.read(bytes_to_read) if format is not None: data = numpy.fromstring(raw, format) else: data = raw if swap: data = data.byteswap() return data def read_tag_group(self): self.grouptag_is_sorted = self.readbytes(1, numpy.uint8)[0] self.grouptag_is_open = self.readbytes(1, numpy.uint8)[0] self.grouptag_no_tags = self.readbytes(4, numpy.uint32)[0] logger.debug('TagGroup is sorted? %s', self.grouptag_is_sorted) logger.debug('TagGroup is open? %s', self.grouptag_is_open) logger.debug('no of tags in TagGroup %s', self.grouptag_no_tags) def read_tag_entry(self): self.tag_is_data = self.readbytes(1, numpy.uint8)[0] self.tag_label_length = self.readbytes(2, numpy.uint16)[0] logger.debug('does Tag have data ? %s - 20 = Tag group , 21 = data ', self.tag_is_data) logger.debug('length of tag_label %s', self.tag_label_length) if self.tag_label_length != 0: tag_label = self.infile.read(self.tag_label_length) else: tag_label = None if self.tag_is_data == 21: # This is data try: key = tag_label.decode("latin-1") except: key = "None" value = self.read_tag_type() if isinstance(value, binary_type): value = value.decode() if key == "None": logger.info("%s: %s", key, value) else: logger.debug("%s: %s", key, value) self.header[key] = value def read_tag_type(self): if self.infile.read(4) != b'%%%%': raise IOError self.tag_data_type = self.readbytes(4, numpy.uint32)[0] logger.debug('data is of type: %s - 1 = simple, 2 = string, 3 = array, >3 structs.', self.tag_data_type) self.tag_encoded_type = self.readbytes(4, numpy.uint32)[0] logger.debug('encode type: %s %s', self.tag_encoded_type, DATA_TYPES[self.tag_encoded_type]) if self.tag_data_type == 1: # simple type return self.readbytes(DATA_BYTES[self.tag_encoded_type], DATA_TYPES[self.tag_encoded_type], swap=self.swap)[0] # are the data stored in a simple array? if self.tag_encoded_type == 20 and self.tag_data_type == 3: self.data_type = self.readbytes(4, numpy.uint32)[0] self.no_data_elements = self.readbytes(4, numpy.uint32)[0] if self.data_type == 10: logger.debug('skip bytes %s', self.no_data_elements) _dump = self.infile.read(self.no_data_elements) return None logger.debug('Data are stored as a simple a array -') logger.debug('%s data elements stored as %s', self.no_data_elements, self.data_type) read_no_bytes = DATA_BYTES[self.data_type] * self.no_data_elements fmt = DATA_TYPES[self.data_type] return self.readbytes(read_no_bytes, fmt, swap=self.swap) # are the data stored in a complex array ? # print 'tag_type + data_type', self.tag_encoded_type,self.tag_data_type # print self.tag_encoded_type , self.tag_data_type if self.tag_encoded_type == 20 and self.tag_data_type > 3: self.tag_encoded_type = self.readbytes(4, numpy.uint32)[0] logger.debug('found array - new tag_encoded_type %s', self.tag_encoded_type) if self.tag_encoded_type == 15: # struct type # ##type = self.readbytes(4,numpy.int32) _struct_name_length = self.readbytes(4, numpy.int32)[0] struct_number_fields = self.readbytes(4, numpy.int32)[0] # print 'struct - name_length, number_field', struct_name_length,struct_number_fields # print self.infile.read(_struct_name_length) field_info = [] for i in range(struct_number_fields): field_info.append([self.readbytes(4, numpy.int32)[0], self.readbytes(4, numpy.int32)[0]]) # print field_info self.no_data_elements = self.readbytes(4, numpy.int32)[0] # print '%i data elemets stored as ' %self.no_data_elements bytes_in_struct = 0 for i in range(struct_number_fields): bytes_in_struct += DATA_BYTES[field_info[i][1]] logger.debug('skip bytes %s', self.no_data_elements * bytes_in_struct) _dump = self.infile.read(self.no_data_elements * bytes_in_struct) return None if self.tag_encoded_type == 15: # struct type # ##type = self.readbytes(4,numpy.int32) _struct_name_length = self.readbytes(4, numpy.int32)[0] struct_number_fields = self.readbytes(4, numpy.int32)[0] # print 'struct - name_length, number_field', _struct_name_length,struct_number_fields # print self.infile.read(struct_name_length) field_info = [] for i in range(struct_number_fields): field_info.append([self.readbytes(4, numpy.int32)[0], self.readbytes(4, numpy.int32)[0]]) # print field_info field_data = b'' for i in range(struct_number_fields): field_data += self.readbytes(field_info[i][0], None, swap=False) + b' ' data = self.readbytes(DATA_BYTES[field_info[i][1]], DATA_TYPES[field_info[i][1]], swap=self.swap) field_data += str(data[0]).encode()+ b" " return field_data def read_data(self): self.encoded_datatype = numpy.fromstring(self.infile.read(4), numpy.uint32).byteswap() dm3image = Dm3Image fabio-0.6.0/fabio/eigerimage.py0000644001611600070440000001553313227357030017451 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: FabIO X-ray image reader # # Copyright (C) 2010-2016 European Synchrotron Radiation Facility # Grenoble, France # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """Eiger data/master file reader for FabIO Eiger data files are HDF5 files with one group called "entry" and a dataset called "data" in it (now in a data group). Those dataset are usually compressed using LZ4 and/or bitshuffle compression: * https://github.com/nexusformat/HDF5-External-Filter-Plugins/tree/master/LZ4 * https://github.com/kiyo-masui/bitshuffle H5py (>2.5) and libhdf5 (>1.8.10) with the corresponding compression plugin are needed to actually read the data. Under windows, those plugins can easily be installed via this repository: https://github.com/silx-kit/hdf5plugin """ # Get ready for python3: from __future__ import with_statement, print_function, division __authors__ = ["Jérôme Kieffer"] __contact__ = "jerome.kieffer@esrf.fr" __license__ = "MIT" __copyright__ = "ESRF" __date__ = "25/07/2017" import logging logger = logging.getLogger(__name__) try: import h5py except ImportError: h5py = None from .fabioimage import FabioImage from .fabioutils import NotGoodReader class EigerImage(FabioImage): """ FabIO image class for Images from Eiger data files (HDF5) """ DESCRIPTION = "Eiger data files based on HDF5" DEFAULT_EXTENSIONS = ["h5"] def __init__(self, data=None, header=None): """ Set up initial values """ if not h5py: raise RuntimeError("fabio.EigerImage cannot be used without h5py. Please install h5py and restart") FabioImage.__init__(self, data, header) self.dataset = [data] self.h5 = None def __repr__(self): if self.h5 is not None: return "Eiger dataset with %i frames from %s" % (self.nframes, self.h5.filename) else: return "%s object at %s" % (self.__class__.__name__, hex(id(self))) def _readheader(self, infile): """ Read and decode the header of an image: :param infile: Opened python file (can be stringIO or bzipped file) """ # list of header key to keep the order (when writing) self.header = self.check_header() infile.seek(0) def read(self, fname, frame=None): """ try to read image :param fname: name of the file """ self.resetvals() with self._open(fname) as infile: self._readheader(infile) self.dataset = None lstds = [] # read the image data self.h5 = h5py.File(fname, mode="r") if "entry" in self.h5: entry = self.h5["entry"] if "data" in entry: data = entry["data"] if isinstance(data, h5py.Group): "Newer format /entry/data/data_1" datasets = [i for i in data.keys() if i.startswith("data")] datasets.sort() try: for i in datasets: lstds.append(data[i]) except KeyError: pass else: lstds = [data] else: "elder format entry/data_01" datasets = [i for i in entry.keys() if i.startswith("data")] datasets.sort() try: for i in datasets: lstds.append(entry[i]) except KeyError: pass if not lstds: raise NotGoodReader("HDF5 file does not contain an Eiger-like structure.") self.dataset = lstds self.nframes = sum(i.shape[0] for i in lstds) self._dim1 = self.dataset[0].shape[-1] self._dim2 = self.dataset[0].shape[-2] if frame is not None: return self.getframe(int(frame)) else: self.currentframe = 0 self.data = self.dataset[0][self.currentframe, :, :] return self def write(self, fname): """ try to write image :param fname: name of the file """ if len(self.dataset.shape) == 2: self.dataset.shape = (1,) + self.dataset.shape with h5py.File(fname) as h5file: grp = h5file.require_group("entry/data") if len(self.dataset) > 1: for i, ds in enumerate(self.dataset): grp["data_%06i" % i] = ds else: grp["data"] = self.dataset def getframe(self, num): """ returns the frame numbered 'num' in the stack if applicable""" if self.nframes > 1: new_img = None if (num >= 0) and num < self.nframes: if isinstance(self.dataset, list): nfr = num for ds in self.dataset: if nfr < ds.shape[0]: data = ds[nfr] break else: nfr -= ds.shape[0] else: data = self.dataset[num] new_img = self.__class__(data=data, header=self.header) new_img.dataset = self.dataset new_img.h5 = self.h5 new_img.nframes = self.nframes new_img.currentframe = num else: raise IOError("getframe %s out of range [%s %s[" % (num, 0, self.nframes)) else: new_img = FabioImage.getframe(self, num) return new_img def previous(self): """ returns the previous frame in the series as a fabioimage """ return self.getframe(self.currentframe - 1) def next(self): """ returns the next frame in the series as a fabioimage """ return self.getframe(self.currentframe + 1) eigerimage = EigerImage fabio-0.6.0/fabio/raxisimage.py0000644001611600070440000003102613227357030017477 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # Principal author: "Brian R. Pauw" "brian@stack.nl" # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """ Authors: Brian R. Pauw email: brian@stack.nl Written using information gleaned from the ReadRAXISImage program written by T. L. Hendrixson, made available by Rigaku Americas. Available at: http://www.rigaku.com/downloads/software/readimage.html """ # Get ready for python3: from __future__ import with_statement, print_function, division __authors__ = ["Brian R. Pauw"] __contact__ = "brian@stack.nl" __license__ = "MIT" __copyright__ = "Brian R. Pauw" __date__ = "27/07/2017" import logging import struct import os import numpy from .fabioimage import FabioImage from .fabioutils import OrderedDict logger = logging.getLogger(__name__) RIGAKU_KEYS = OrderedDict([ ('InstrumentType', 10), ('Version', 10), ('Crystal Name', 20), ('Crystal System', 12), ('A', 'float'), ('B', 'float'), ('C', 'float'), ('Alpha', 'float'), ('Beta', 'float'), ('Gamma', 'float'), ('Space Group', 12), ('Mosaicity', 'float'), ('Memo', 80), ('Date', 12), ('Reserved Space 1', 84), ('User', 20), ('Xray Target', 4), ('Wavelength', 'float'), ('Monochromator', 20), ('Monochromator 2theta', 'float'), ('Collimator', 20), ('Filter', 4), ('Crystal-to-detector Distance', 'float'), ('Generator Voltage', 'float'), ('Generator Current', 'float'), ('Focus', 12), ('Xray Memo', 80), ('IP shape', 'long'), # 1= cylindrical, 0=flat. A "long" is overkill. ('Oscillation Type', 'float'), # 1=weissenberg. else regular. "float"? really? ('Reserved Space 2', 56), ('Crystal Mount (spindle axis)', 4), ('Crystal Mount (beam axis)', 4), ('Phi Datum', 'float'), # degrees ('Phi Oscillation Start', 'float'), # deg ('Phi Oscillation Stop', 'float'), # deg ('Frame Number', 'long'), ('Exposure Time', 'float'), # minutes ('Direct beam X position', 'float'), # special, x,y ('Direct beam Y position', 'float'), # special, x,y ('Omega Angle', 'float'), # omega angle ('Chi Angle', 'float'), # omega angle ('2Theta Angle', 'float'), # omega angle ('Mu Angle', 'float'), # omega angle ('Image Template', 204), # used for storing scan template.. ('X Pixels', 'long'), ('Y Pixels', 'long'), ('X Pixel Length', 'float'), # mm ('Y Pixel Length', 'float'), # mm ('Record Length', 'long'), ('Total', 'long'), ('Starting Line', 'long'), ('IP Number', 'long'), ('Photomultiplier Ratio', 'float'), ('Fade Time (to start of read)', 'float'), ('Fade Time (to end of read)', 'float'), # good that they thought of this, but is it applied? ('Host Type/Endian', 10), ('IP Type', 10), ('Horizontal Scan', 'long'), # 0=left->Right, 1=Rigth->Left ('Vertical Scan', 'long'), # 0=down->up, 1=up->down ('Front/Back Scan', 'long'), # 0=front, 1=back ('Pixel Shift (RAXIS V)', 'float'), ('Even/Odd Intensity Ratio (RAXIS V)', 'float'), ('Magic number', 'long'), # 'RAPID'-specific ('Number of Axes', 'long'), ('Goniometer Vector ax.1.1', 'float'), ('Goniometer Vector ax.1.2', 'float'), ('Goniometer Vector ax.1.3', 'float'), ('Goniometer Vector ax.2.1', 'float'), ('Goniometer Vector ax.2.2', 'float'), ('Goniometer Vector ax.2.3', 'float'), ('Goniometer Vector ax.3.1', 'float'), ('Goniometer Vector ax.3.2', 'float'), ('Goniometer Vector ax.3.3', 'float'), ('Goniometer Vector ax.4.1', 'float'), ('Goniometer Vector ax.4.2', 'float'), ('Goniometer Vector ax.4.3', 'float'), ('Goniometer Vector ax.5.1', 'float'), ('Goniometer Vector ax.5.2', 'float'), ('Goniometer Vector ax.5.3', 'float'), ('Goniometer Start ax.1', 'float'), ('Goniometer Start ax.2', 'float'), ('Goniometer Start ax.3', 'float'), ('Goniometer Start ax.4', 'float'), ('Goniometer Start ax.5', 'float'), ('Goniometer End ax.1', 'float'), ('Goniometer End ax.2', 'float'), ('Goniometer End ax.3', 'float'), ('Goniometer End ax.4', 'float'), ('Goniometer End ax.5', 'float'), ('Goniometer Offset ax.1', 'float'), ('Goniometer Offset ax.2', 'float'), ('Goniometer Offset ax.3', 'float'), ('Goniometer Offset ax.4', 'float'), ('Goniometer Offset ax.5', 'float'), ('Goniometer Scan Axis', 'long'), ('Axes Names', 40), ('file', 16), ('cmnt', 20), ('smpl', 20), ('iext', 'long'), ('reso', 'long'), ('save', 'long'), ('dint', 'long'), ('byte', 'long'), ('init', 'long'), ('ipus', 'long'), ('dexp', 'long'), ('expn', 'long'), ('posx', 20), ('posy', 20), ('xray', 'long'), # more values can be added here ('Header Leftovers', -1) ]) class RaxisImage(FabioImage): """ FabIO image class to read Rigaku RAXIS image files. Write functions are not planned as there are plenty of more suitable file formats available for storing detector data. In particular, the MSB used in Rigaku files is used in an uncommon way: it is used as a *multiply-by* flag rather than a normal image value bit. While it is said to multiply by the value specified in the header, there is at least one case where this is found not to hold, so YMMV and be careful. """ DESCRIPTION = "Rigaku RAXIS file format" DEFAULT_EXTENSIONS = ["img"] def __init__(self, *arg, **kwargs): """ Generic constructor """ FabioImage.__init__(self, *arg, **kwargs) self.bytecode = 'uint16' # same for all RAXIS images AFAICT self.bpp = 2 self.endianness = '>' # this may be tested for. def swap_needed(self): """not sure if this function is needed""" endian = self.endianness # Decide if we need to byteswap if (endian == '<' and numpy.little_endian) or (endian == '>' and not numpy.little_endian): return False if (endian == '>' and numpy.little_endian) or (endian == '<' and not numpy.little_endian): return True def _readheader(self, infile): """ Read and decode the header of a Rigaku RAXIS image. The Rigaku format uses a block of (at least) 1400 bytes for storing information. The information has a fixed structure, but endianness can be flipped for non-char values. Header items which are not capitalised form part of a non-standardized data block and may not be accurate. TODO: It would be useful to have an automatic endianness test in here. :param infile: Opened python file (can be stringIO or bzipped file) """ endianness = self.endianness # list of header key to keep the order (when writing) self.header = self.check_header() # swapBool=False fs = endianness minHeaderLength = 1400 # from rigaku's def # if (numpy.little_endian and endianness=='>'): # swapBool=True # file should be open already # fh=open(filename,'rb') infile.seek(0) # hopefully seeking works. rawHead = infile.read(minHeaderLength) # fh.close() #don't like open files in case of intermediate crash curByte = 0 for key, kind in RIGAKU_KEYS.items(): if isinstance(kind, int): # read a number of bytes, convert to char. # if -1, read remainder of header if kind == -1: rByte = len(rawHead) - curByte self.header[key] = struct.unpack(fs + str(rByte) + 's', rawHead[curByte: curByte + rByte])[0] curByte += rByte break rByte = kind self.header[key] = struct.unpack(fs + str(rByte) + 's', rawHead[curByte: curByte + rByte])[0] curByte += rByte elif kind == 'float': # read a float, 4 bytes rByte = 4 self.header[key] = struct.unpack(fs + 'f', rawHead[curByte: curByte + rByte])[0] curByte += rByte elif kind == 'long': # read a long, 4 bytes rByte = 4 self.header[key] = struct.unpack(fs + 'l', rawHead[curByte: curByte + rByte])[0] curByte += rByte else: logger.warning('special header data type %s not understood', kind) if len(rawHead) == curByte: # "end reached" break def read(self, fname, frame=None): """ try to read image :param fname: name of the file :param frame: """ self.resetvals() infile = self._open(fname, 'rb') offset = -1 # read from EOF backward self._readheader(infile) # we read the required bytes from the end of file, using code # lifted from binaryimage # read the image data self.dim1 = self.header['X Pixels'] self.dim2 = self.header['Y Pixels'] self.bytecode = numpy.uint16 dims = [self.dim2, self.dim1] size = dims[0] * dims[1] * self.bpp if offset >= 0: infile.seek(offset) else: try: attrs = dir(infile) if "measure_size" in attrs: # Handle specifically gzip infile.seek(infile.measure_size() - size) # seek from EOF backwards elif "size" in attrs: infile.seek(infile.size - size) # seek from EOF backwards if "len" in attrs: infile.seek(infile.len - size) # seek from EOF backwards else: infile.seek(-size + offset + 1, os.SEEK_END) # seek from EOF backwards # infile.seek(-size + offset + 1 , os.SEEK_END) #seek from EOF backwards except IOError as error: logger.warning('expected datablock too large, please check bytecode settings: %s, IOError: %s' % (self.bytecode, error)) except Exception as error: logger.error('Uncommon error encountered when reading file: %s' % error) rawData = infile.read(size) if self.swap_needed(): data = numpy.fromstring(rawData, self.bytecode).byteswap().reshape(tuple(dims)) else: data = numpy.fromstring(rawData, self.bytecode).reshape(tuple(dims)) # print(data) di = (data >> 15) != 0 # greater than 2^15 if di.sum() >= 1: # find indices for which we need to do the correction (for which # the 16th bit is set): logger.debug("Correct for PM: %s" % di.sum()) data = data << 1 >> 1 # reset bit #15 to zero self.bytecode = numpy.uint32 data = data.astype(self.bytecode) # Now we do some fixing for Rigaku's refusal to adhere to standards: sf = self.header['Photomultiplier Ratio'] # multiply by the ratio defined in the header # data[di] *= sf data[di] = (sf * data[di]).astype(numpy.uint32) self.bpp = numpy.dtype(self.bytecode).itemsize self.data = data return self def rigakuKeys(self): # returns dict of keys and keyLengths RKey = RIGAKU_KEYS orderList = list(RIGAKU_KEYS.keys()) return RKey, orderList raxisimage = RaxisImage fabio-0.6.0/fabio/jpegimage.py0000644001611600070440000000651713227357030017305 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: FabIO X-ray image reader # # Copyright (C) 2010-2016 European Synchrotron Radiation Facility # Grenoble, France # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ FabIO class for dealing with JPEG images. """ from __future__ import with_statement, print_function, division __authors__ = ["Valentin Valls"] __date__ = "27/07/2017" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __status__ = "stable" import logging logger = logging.getLogger(__name__) try: from PIL import Image except ImportError: Image = None from .fabioimage import FabioImage from .utils import pilutils # List of reserved keys reached from # http://pillow.readthedocs.io/en/3.4.x/handbook/image-file-formats.html#jpeg JPEG_RESERVED_HEADER_KEYS = [ "jfif", "jfif_version", "jfif_density", "jfif_unit", "dpi", "adobe", "adobe_transform", "progression", "icc_profile", "exif", "quality", "optimize", "progressive", "dpi", "exif", "subsampling", "qtables" ] class JpegImage(FabioImage): """ Images in JPEG format using PIL """ DESCRIPTION = "JPEG format" DEFAULT_EXTENSIONS = ["jpg", "jpeg"] RESERVED_HEADER_KEYS = JPEG_RESERVED_HEADER_KEYS _need_a_seek_to_read = True def __init__(self, *args, **kwds): """ Tifimage constructor adds an nbits member attribute """ self.nbits = None FabioImage.__init__(self, *args, **kwds) def _readWithPil(self, filename, infile): try: infile.seek(0) self.pilimage = Image.open(infile) except Exception: infile.seek(0) raise IOError("Error in opening %s with PIL" % filename) data = pilutils.get_numpy_array(self.pilimage) self.data = data if self.pilimage and self.pilimage.info: for k, v in self.pilimage.info.items(): self.header[k] = v def read(self, filename, frame=None): infile = self._open(filename, "rb") self.data = None if Image is not None: self._readWithPil(filename, infile) if self.data is None: infile.seek(0) raise IOError("Error in opening %s." % filename) self.resetvals() return self jpegimage = JpegImage fabio-0.6.0/fabio/bruker100image.py0000644001611600070440000004147213227357030020072 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. """Authors: Henning O. Sorensen & Erik Knudsen Center for Fundamental Research: Metal Structures in Four Dimensions Risoe National Laboratory Frederiksborgvej 399 DK-4000 Roskilde email:erik.knudsen@risoe.dk Jérôme Kieffer, ESRF, Grenoble, France Sigmund Neher, GWDG, Göttingen, Germany """ # get ready for python3 from __future__ import absolute_import, print_function, with_statement, division __authors__ = ["Henning O. Sorensen", "Erik Knudsen", "Jon Wright", "Jérôme Kieffer", "Sigmund Neher"] __status__ = "production" __copyright__ = "2007-2009 Risoe National Laboratory; 2015-2016 ESRF, 2016 GWDG" __licence__ = "MIT" import numpy import logging import os from math import ceil logger = logging.getLogger(__name__) try: from PIL import Image except ImportError: Image = None from .brukerimage import BrukerImage from .readbytestream import readbytestream from .fabioutils import pad, StringTypes class Bruker100Image(BrukerImage): DESCRIPTION = "SFRM File format used by Bruker detectors (version 100)" DEFAULT_EXTENSIONS = ["sfrm"] bpp_to_numpy = {1: numpy.uint8, 2: numpy.uint16, 4: numpy.int32} version = 100 def __init__(self, data=None, header=None): BrukerImage.__init__(self, data, header) self.nover_one = self.nover_two = 0 def _readheader(self, infile): """ The bruker format uses 80 char lines in key : value format In the first 512*5 bytes of the header there should be a HDRBLKS key, whose value denotes how many 512 byte blocks are in the total header. The header is always n*5*512 bytes, otherwise it wont contain whole key: value pairs """ line = 80 blocksize = 512 nhdrblks = 5 # by default we always read 5 blocks of 512 self.__headerstring__ = infile.read(blocksize * nhdrblks).decode("ASCII") self.header = self.check_header() for i in range(0, nhdrblks * blocksize, line): if self.__headerstring__[i: i + line].find(":") > 0: key, val = self.__headerstring__[i: i + line].split(":", 1) key = key.strip() # remove the whitespace (why?) val = val.strip() if key in self.header: # append lines if key already there self.header[key] = self.header[key] + os.linesep + val else: self.header[key] = val # we must have read this in the first 5*512 bytes. nhdrblks = int(self.header['HDRBLKS']) self.header['HDRBLKS'] = nhdrblks # Now read in the rest of the header blocks, appending self.__headerstring__ += infile.read(blocksize * (nhdrblks - 5)).decode("ASCII") for i in range(5 * blocksize, nhdrblks * blocksize, line): if self.__headerstring__[i: i + line].find(":") > 0: # as for first 512 bytes of header key, val = self.__headerstring__[i: i + line].split(":", 1) key = key.strip() val = val.strip() if key in self.header: self.header[key] = self.header[key] + os.linesep + val else: self.header[key] = val # set the image dimensions self.dim1 = int(self.header['NROWS'].split()[0]) self.dim2 = int(self.header['NCOLS'].split()[0]) self.version = int(self.header.get('VERSION', "100")) def toPIL16(self, filename=None): if not Image: raise RuntimeError("PIL is not installed !!! ") if filename: self.read(filename) PILimage = Image.frombuffer("F", (self.dim1, self.dim2), self.data, "raw", "F;16", 0, -1) return PILimage def read(self, fname, frame=None): '''data is stored in three blocks: data (uint8), overflow (uint32), underflow (int32). The blocks are zero paded to a multiple of 16 bits ''' with self._open(fname, "rb") as infile: self._readheader(infile) rows = self.dim1 cols = self.dim2 npixelb = int(self.header['NPIXELB'][0]) # you had to read the Bruker docs to know this! # We are now at the start of the image - assuming bruker._readheader worked # Get image block size from NPIXELB. # The total size is nbytes * nrows * ncolumns. self.data = readbytestream(infile, infile.tell(), rows, cols, npixelb, datatype="int", signed='n', swap='n') # now process the overflows for k, nover in enumerate(self.header['NOVERFL'].split()): if k == 0: # read the set of "underflow pixels" - these will be completely disregarded for now continue nov = int(nover) if nov <= 0: continue bpp = 1 << k # (2 ** k) datatype = self.bpp_to_numpy[bpp] # upgrade data type self.data = self.data.astype(datatype) # pad nov*bpp to a multiple of 16 bytes nbytes = (nov * bpp + 15) & ~(15) # Multiple of 16 just above data_str = infile.read(nbytes) # ar without zeros ar = numpy.fromstring(data_str[:nov * bpp], datatype) # insert the the overflow pixels in the image array: lim = (1 << (8 * k)) - 1 # generate an array comprising of the indices into data.ravel() # where its value equals lim. flat = self.data.ravel() mask = numpy.where(flat >= lim)[0] # now put values from ar into those indices if k != 0: flat.put(mask, ar) else: # only working because nov = - is treated bevor self.ar_underflows = ar logger.debug("%s bytes read + %d bytes padding" % (nov * bpp, nbytes - nov * bpp)) # infile.close() # replace zeros with values from underflow block if int(self.header["NOVERFL"].split()[0]) > 0: flat = self.data.ravel() self.mask_undeflows = numpy.where(flat == 0)[0] self.mask_no_undeflows = numpy.where(self.data != 0) flat.put(self.mask_undeflows, self.ar_underflows) # add basline if int(self.header["NOVERFL"].split()[0]) != -1: baseline = int(self.header["NEXP"].split()[2]) self.data[self.mask_no_undeflows] += baseline # print(self.data.max(), self.data.min(), self.data[numpy.where(self.data==0)].shape) self.resetvals() return self def gen_header(self): """ Generate headers (with some magic and guesses) format is Bruker100 """ headers = [] for key in self.HEADERS_KEYS: if key in self.header: value = self.header[key] if key == "CFR": line = key.ljust(4) + ":" else: line = key.ljust(7) + ":" if type(value) in StringTypes: if key == 'NOVERFL': line += str(str(self.nunderFlows).ljust(24, ' ') + str(self.nover_one).ljust(24) + str(self.nover_two)) elif key == "DETTYPE": line += str(value) elif key == "CFR": line += str(value) elif os.linesep in value: lines = value.split(os.linesep) for i in lines[:-1]: headers.append((line + str(i)).ljust(80, " ")) line = key.ljust(7) + ":" line += str(lines[-1]) elif len(value) < 72: line += str(value) else: for i in range(len(value) // 72): headers.append((line + str(value[72 * i:72 * (i + 1)]))) line = key.ljust(7) + ":" line += value[72 * (i + 1):] elif "__len__" in dir(value): f = "\%.%is" % 72 // len(value) - 1 line += " ".join([f % i for i in value]) else: line += str(value) headers.append(line.ljust(80, " ")) header = "".join(headers) if len(header) > 512 * self.header["HDRBLKS"]: tmp = ceil(len(header) / 512.0) self.header["HDRBLKS"] = int(ceil(tmp / 5.0) * 5.0) for i in range(len(headers)): if headers[i].startswith("HDRBLKS"): headers[i] = ("HDRBLKS:%s" % self.header["HDRBLKS"]).ljust(80, " ") else: self.header["HDRBLKS"] = 15 res = pad("".join(headers), self.SPACER + "." * 78, 512 * int(self.header["HDRBLKS"])) return res def gen_overflow(self): """ Generate an overflow table, including the underflow, marked as 65535 . """ bpp = 2 limit = 255 # noverf = int(self.header['NOVERFL'].split()[1]) noverf = self.noverf read_bytes = (noverf * bpp + 15) & ~(15) # since data b dif2usedbyts = read_bytes - (noverf * bpp) pad_zeros = numpy.zeros(dif2usedbyts / bpp).astype(self.bpp_to_numpy[bpp]) flat = self.data.ravel() # flat memory view flow_pos = numpy.logical_or(flat >= limit, flat < 0) # flow_pos_indexes = numpy.where(flow_pos)[0] flow_vals = (flat[flow_pos]) flow_vals[flow_vals < 0] = 65535 # limit#flow_vals[flow_vals<0] flow_vals_paded = numpy.hstack((flow_vals, pad_zeros)).astype(self.bpp_to_numpy[bpp]) return flow_vals_paded # pad(overflow, ".", 512) def gen_underflow100(self): """ Generate an underflow table """ bpp = 4 noverf = int(self.header['NOVERFL'].split()[2]) # nunderf = self.nunderf read_bytes = (noverf * bpp + 15) & ~(15) dif2usedbyts = read_bytes - (noverf * bpp) pad_zeros = numpy.zeros(dif2usedbyts / bpp).astype(self.bpp_to_numpy[bpp]) flat = self.data.ravel() # flat memory view underflow_pos = numpy.where(flat < 0)[0] underflow_val = flat[underflow_pos] underflow_val = underflow_val.astype(self.bpp_to_numpy[bpp]) nderflow_val_paded = numpy.hstack((underflow_val, pad_zeros)) return nderflow_val_paded def write(self, fname): """ Write a bruker image """ if numpy.issubdtype(self.data.dtype, float): if "LINEAR" in self.header: try: slope, offset = self.header["LINEAR"].split(None, 1) slope = float(slope) offset = float(offset) except Exception: logger.warning("Error in converting to float data with linear parameter: %s" % self.header["LINEAR"]) slope, offset = 1.0, 0.0 else: offset = self.data.min() max_data = self.data.max() max_range = 2 ** 24 - 1 # similar to the mantissa of a float32 if max_data > offset: slope = (max_data - offset) / float(max_range) else: slope = 1.0 tmp_data = numpy.round(((self.data - offset) / slope)).astype(numpy.uint32) self.header["LINEAR"] = "%s %s" % (slope, offset) else: if int(self.header["NOVERFL"].split()[0]) > 0: baseline = int(self.header["NEXP"].split()[2]) self.data[self.mask_no_undeflows] -= baseline tmp_data = self.data minusMask = numpy.where(tmp_data < 0) bpp = self.calc_bpp(tmp_data) # self.basic_translate(fname) limit = 2 ** (8 * bpp) - 1 data = tmp_data.astype(self.bpp_to_numpy[bpp]) reset = numpy.where(tmp_data >= limit) self.nunderFlows = int(self.header["NOVERFL"].split()[0]) self.nover_one = len(reset[0]) + len(minusMask[0]) self.nover_two = len(minusMask[0]) data[reset] = limit data[minusMask] = limit if not numpy.little_endian and bpp > 1: # Bruker enforces little endian data.byteswap(True) with self._open(fname, "wb") as bruker: bruker.write(self.gen_header().encode("ASCII")) bruker.write(data.tostring()) overflows_one_byte = self.overflows_one_byte() overflows_two_byte = self.overflows_two_byte() if int(self.header["NOVERFL"].split()[0]) > 0: underflows = self.underflows() bruker.write(underflows.tostring()) bruker.write(overflows_one_byte.tostring()) bruker.write(overflows_two_byte.tostring()) def underflows(self): """ Generate underflow table """ bpp = 1 # limit = 255 nunderFlows = self.nunderFlows # temp_data = self.data read_bytes = (nunderFlows * bpp + 15) & ~(15) # multiple of 16 dif2usedbyts = read_bytes - (nunderFlows * bpp) pad_zeros = numpy.zeros(dif2usedbyts / bpp).astype(self.bpp_to_numpy[bpp]) # flat = self.data.ravel() # flat memory view # flow_pos_indexes = self.mask_undeflows flow_vals = (self.ar_underflows) # flow_vals[flow_vals<0] = 65535#limit#flow_vals[flow_vals<0] flow_vals_paded = numpy.hstack((flow_vals, pad_zeros)).astype(self.bpp_to_numpy[bpp]) return flow_vals_paded # pad(overflow, ".", 512) def overflows_one_byte(self): """ Generate one-byte overflow table """ bpp = 2 limit = 255 nover_one = self.nover_one # temp_data = self.data read_bytes = (nover_one * bpp + 15) & ~(15) # multiple of 16 dif2usedbyts = read_bytes - (nover_one * bpp) pad_zeros = numpy.zeros(dif2usedbyts // bpp, dtype=self.bpp_to_numpy[bpp]) flat = self.data.ravel() # flat memory view flow_pos = (flat >= limit) + (flat < 0) # flow_pos_indexes = numpy.where(flow_pos == True)[0] flow_vals = (flat[flow_pos]) flow_vals[flow_vals < 0] = 65535 # limit#flow_vals[flow_vals<0] # print("flow_vals",flow_vals) flow_vals_paded = numpy.hstack((flow_vals, pad_zeros)).astype(self.bpp_to_numpy[bpp]) return flow_vals_paded # pad(overflow, ".", 512) def overflows_two_byte(self): """ Generate two byte overflow table """ bpp = 4 noverf = int(self.header['NOVERFL'].split()[2]) # nover_two = self.nover_two read_bytes = (noverf * bpp + 15) & ~(15) # multiple of 16 dif2usedbyts = read_bytes - (noverf * bpp) pad_zeros = numpy.zeros(dif2usedbyts // bpp, dtype=self.bpp_to_numpy[bpp]) flat = self.data.ravel() # flat memory view underflow_pos = numpy.where(flat < 0)[0] underflow_val = flat[underflow_pos] # [::-1] # underflow_val[underflow_val 0] = 65535#limit#flow_vals[flow_vals<0] underflow_val = underflow_val.astype(self.bpp_to_numpy[bpp]) nderflow_val_paded = numpy.hstack((underflow_val, pad_zeros)) return nderflow_val_paded # pad(overflow, ".", 512) bruker100image = Bruker100Image fabio-0.6.0/fabio/GEimage.py0000644001611600070440000003106413227357030016646 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # # # Reads the header from a GE a-Si Angio Detector # Using version 8001 of the header from file: # c:\adept\core\DefaultImageInfoConfig.csv # # Antonino Miceli # Thu Jan 4 13:46:31 CST 2007 # # modifications by Jon Wright for style, pychecker and fabio # # Get ready for python3: from __future__ import with_statement, print_function, division __authors__ = ["Antonino Miceli", "Jon Wright", "Jérôme Kieffer"] __date__ = "25/07/2017" __status__ = "production" __copyright__ = "2007 APS; 2010-2015 ESRF" __licence__ = "MIT" import numpy import struct import logging logger = logging.getLogger(__name__) from .fabioimage import FabioImage from .fabioutils import next_filename, previous_filename GE_HEADER_INFO = [ # Name, length in bytes, format for struct (None means string) ('ImageFormat', 10, None), ('VersionOfStandardHeader', 2, ' self.nframes or img_num < 0): raise Exception("Bad image number") imgstart = self.header['StandardHeaderSizeInBytes'] + \ self.header['UserHeaderSizeInBytes'] + \ img_num * self.header['NumberOfRowsInFrame'] * \ self.header['NumberOfColsInFrame'] * \ self.header['ImageDepthInBits'] // 8 # whence = 0 means seek from start of file filepointer.seek(imgstart, 0) self.bpp = self.header['ImageDepthInBits'] // 8 # hopefully 2 imglength = self.header['NumberOfRowsInFrame'] * \ self.header['NumberOfColsInFrame'] * self.bpp if self.bpp != 2: logger.warning("Using uint16 for GE but seems to be wrong, bpp=%s" % self.bpp) data = numpy.fromstring(filepointer.read(imglength), numpy.uint16) if not numpy.little_endian: data.byteswap(True) data.shape = (self.header['NumberOfRowsInFrame'], self.header['NumberOfColsInFrame']) self.data = data self.dim2, self.dim1 = self.data.shape self.currentframe = int(img_num) self._makeframename() def getframe(self, num): """ Returns a frame as a new FabioImage object """ if num < 0 or num > self.nframes: raise Exception("Requested frame number is out of range") # Do a deep copy of the header to make a new one newheader = {} for k in self.header.keys(): newheader[k] = self.header[k] frame = GeImage(header=newheader) frame.nframes = self.nframes frame.sequencefilename = self.sequencefilename infile = frame._open(self.sequencefilename, "rb") frame._readframe(infile, num) infile.close() return frame def next(self): """ Get the next image in a series as a fabio image """ if self.currentframe < (self.nframes - 1) and self.nframes > 1: return self.getframe(self.currentframe + 1) else: newobj = GeImage() newobj.read(next_filename( self.sequencefilename)) return newobj def previous(self): """ Get the previous image in a series as a fabio image """ if self.currentframe > 0: return self.getframe(self.currentframe - 1) else: newobj = GeImage() newobj.read(previous_filename( self.sequencefilename)) return newobj def demo(): import sys import time if len(sys.argv) < 2: print("USAGE: GE_script.py ") sys.exit() image_file = sys.argv[1] print("init read_GEaSi_data class and load header..") sequence1 = GeImage() sequence1.read(image_file) print("TimeBetweenFramesInMicrosecs = ") print(sequence1.header['TimeBetweenFramesInMicrosecs']) print("AcquisitionTime = ") print(sequence1.header['AcquisitionTime']) print("Mean = ", sequence1.data.ravel().mean()) while 1: start = time.time() sequence1 = sequence1.next() duration = time.time() - start print(sequence1.currentframe, sequence1.data.ravel().mean(), duration) GEimage = GeImage if __name__ == '__main__': demo() fabio-0.6.0/fabio/app/0000755001611600070440000000000013227375744015566 5ustar kiefferscisoft00000000000000fabio-0.6.0/fabio/app/__init__.py0000644001611600070440000000000013227357030017651 0ustar kiefferscisoft00000000000000fabio-0.6.0/fabio/app/viewer.py0000755001611600070440000020342513227357030017436 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fast Azimuthal integration # https://github.com/silx-kit/pyFAI # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Authors: Gael Goret # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # . # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # . # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Portable diffraction images viewer/converter * Written in Python, it combines the functionalities of the I/O library fabIO with a user friendly Qt GUI. * Image converter is also a light viewer based on the visualization tool provided by the module matplotlib. """ from __future__ import absolute_import, with_statement, print_function __version__ = "1.0" __author__ = u"Gaël Goret, Jérôme Kieffer" __copyright__ = "2015 ESRF" __licence__ = "GPL" import sys import os import time from . import _qt as qt from ._matplotlib import FigureCanvasQTAgg from ._matplotlib import NavigationToolbar2QT from matplotlib.figure import Figure import numpy numpy.seterr(divide='ignore') import fabio from fabio.nexus import Nexus from fabio.third_party.argparse import ArgumentParser output_format = ['*.bin', '*.cbf', '*.edf', '*.h5', '*.img', '*.mar2300', '*.mar3450', '*.marccd', '*.tiff', "*.sfrm"] class AppForm(qt.QMainWindow): def __init__(self, parent=None): # Main window qt.QMainWindow.__init__(self, parent) self.setWindowTitle('FabIO Viewer') self.setSizePolicy(qt.QSizePolicy().Expanding, qt.QSizePolicy().Expanding) # Menu and widget self.create_menu() self.create_main_frame() self.create_status_bar() # Active Data self.data = numpy.array([]) self.header = [] # Data Series self.imgDict = {} self.data_series = [] self.header_series = [] self.sequential_file_list = [] self.sequential_file_dict = {} # Miscellaneous self.mask = None self.transform_data_series = False self.transform_list = [] self.sequential_file_mode = False self.h5_loaded = False self.counter_format = '%03d' def format_header(self, d): """ :param d: dict containing headers :return: formated string """ keys = list(d.keys()) keys.sort() res = " \n".join(['%s: %s' % (k, d[k]) for k in keys]) + " \n" return res def _open(self, filename): """Returns a fabio image if the file can be loaded, else display a dialog to help decoding the file. Else return None.""" try: img = fabio.open(filename) except Exception as _: message = 'Automatic format recognition procedure failed or '\ 'pehaps you are trying to open a binary data block...\n\n'\ 'Switch to manual procedure.' qt.QMessageBox.warning(self, 'Message', message) dial = BinDialog(self) dim1, dim2, offset, bytecode, endian = dial.exec_() if dim1 is not None and dim2 is not None: if endian == 'Short': endian = '<' else: endian = '>' img = fabio.binaryimage.binaryimage() img.read(filename, dim1, dim2, offset, bytecode, endian) img.header = {'Info': 'No header information available in binary data blocks'} else: return return img def open_data_series(self, series=None): if not series: series = qt.QFileDialog.getOpenFileNames(self, 'Select and open series of files') if isinstance(series, tuple): # PyQt5 compatibility series = series[0] series = [str(f) for f in list(series)] total = len(series) if len(series) != 0: self.data_series = [] self.header_series = [] self.sequential_file_list = [] iid = 0 self.imgDict = {} self.sequential_file_dict = {} self.images_list.clear() self.imagelistWidget.clear() self.headerTextEdit.clear() self.axes.clear() self.canvas.draw() self.h5_loaded = False for fname in series: if fname: extract_fname = self.extract_fname_from_path(fname) if self.sequential_file_mode: self.statusBar().showMessage('Adding path %s to batch image list, please wait...' % fname) self.log.appendPlainText('Adding path %s to batch image list' % fname) qt.QCoreApplication.processEvents() self.imagelistWidget.addItem(extract_fname) self.sequential_file_list += [extract_fname] self.sequential_file_dict[extract_fname] = fname iid += 1 else: self.statusBar().showMessage('Opening file %s, please wait...' % fname) self.log.appendPlainText('Opening file %s' % fname) qt.QCoreApplication.processEvents() img = self._open(fname) if img is None: continue if img.nframes > 1: for img_idx in range(img.nframes): frame = img.getframe(img_idx) self.data_series.append(frame.data[:]) self.header_series.append(frame.header.copy()) frame_name = "%s # %i" % (extract_fname, img_idx) self.images_list.addItem(frame_name) self.imagelistWidget.addItem(frame_name) self.imgDict[frame_name] = iid self.sequential_file_list += [frame_name] self.sequential_file_dict[frame_name] = fname iid += 1 else: self.data_series.append(img.data[:]) self.header_series.append(img.header.copy()) extract_fname = self.extract_fname_from_path(fname) self.images_list.addItem(extract_fname) self.imagelistWidget.addItem(extract_fname) self.imgDict[extract_fname] = iid self.sequential_file_list += [extract_fname] self.sequential_file_dict[extract_fname] = fname iid += 1 self.progressBar.setValue(float(iid + 1) / (total) * 100.) self.statusBar().clearMessage() self.progressBar.setValue(0) self.log.appendPlainText('Opening procedure: Complete') if self.data_series: self.select_new_image(None, imgID=0) def open_h5_data_series(self): # TODO batch mode compatibility fname = qt.QFileDialog.getOpenFileName(self, 'Select and open series of files') if isinstance(fname, tuple): # PyQt5 compatibility fname = fname[0] fname = str(fname) self.h5_loaded = True if self.filecheckBox.checkState(): self.filecheckBox.stateChanged.disconnect() self.filecheckBox.setCheckState(False) self.sequential_file_mode = False self.filecheckBox.stateChanged.connect(self.sequential_option) message = 'Sequential file mode is not compatible with hdf5 input file: option removed' qt.QMessageBox.warning(self, 'Message', message) if fname: self.data_series = [] self.header_series = [] self.sequential_file_list = [] self.sequential_file_dict = {} self.imagelistWidget.clear() self.headerTextEdit.clear() with Nexus(fname, 'r') as nxs: entry = nxs.get_entries()[0] nxdata = nxs.get_class(entry, class_type="NXdata")[0] dataset = nxdata.get("data", numpy.zeros(shape=(1, 1, 1))) total = dataset.shape[0] imgDict = {} extract_fname = os.path.basename(os.path.splitext(fname)[0]) self.images_list.clear() safeiid = 0 for iid in range(total): self.progressBar.setValue(((iid + 1.0) / (total)) * 100.) self.log.appendPlainText('Extracting data from hdf5 archive, image number %d' % iid) qt.QCoreApplication.processEvents() self.data_series.append(dataset[iid]) self.header_series += [{'Info': 'No header information available in hdf5 Archive'}] imgDict[extract_fname + str(iid)] = safeiid self.images_list.addItem(extract_fname + str(iid)) safeiid += 1 self.statusBar().clearMessage() self.progressBar.setValue(0) self.log.appendPlainText('Hdf5 Extraction: Complete') self.imgDict = imgDict.copy() if self.data_series: self.select_new_image(None, imgID=0) def extract_fname_from_path(self, name): posslash = name.rfind("/") if posslash > -1: return name[posslash + 1:] else: return name defaultSaveFilter = ""\ "binary data block (*.bin);;"\ "cbf image (*.cbf);;"\ "edf image (*.edf);;"\ "oxford diffraction image (*.img);;"\ "mar2300 image(*.mar2300);;"\ "mar3450 image (*.mar3450);;"\ "marccd image (*.marccd));;"\ "tiff image (*.tiff);;"\ "bruker image (*.sfrm)" def _getSaveFileNameAndFilter(self, parent=None, caption='', directory='', filter=''): dialog = qt.QFileDialog(parent, caption=caption, directory=directory) dialog.setAcceptMode(qt.QFileDialog.AcceptSave) dialog.setNameFilter(filter) result = dialog.exec_() if not result: return "", "" return dialog.selectedFiles()[0], dialog.selectedNameFilter() def save_as(self): info = self._getSaveFileNameAndFilter(self, "Save active image as", qt.QDir.currentPath(), filter=self.tr(self.defaultSaveFilter)) if self.data.any(): if str(info[0]) != '' and str(info[1]) != '': format_ = self.extract_format_from_string(str(info[1])) fname = self.add_extention_if_absent(str(info[0]), format_) self.convert_and_write(fname, format_, self.data, self.header) else: if str(info[0]) != '' and str(info[1]) != '': message = "Could not save image as file if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) def save_data_series_as_multiple_file(self): info = self._getSaveFileNameAndFilter(self, "Save data series as multiple files", qt.QDir.currentPath(), filter=self.tr(self.defaultSaveFilter)) if self.data_series or self.sequential_file_list: if str(info[0]) != '' and str(info[1]) != '': format_ = self.extract_format_from_string(str(info[1])) fname = self.os.path.splitext(str(info[0]))[0] self.convert_and_write_multiple_files(fname, format_) else: if str(info[0]) != '' and str(info[1]) != '': message = "Could not save image as file if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) def save_data_series_as_singlehdf(self): info = self._getSaveFileNameAndFilter(self, "Save data series as single high density file", qt.QDir.currentPath(), filter=self.tr("HDF5 archive (*.h5)")) if self.data_series or self.sequential_file_list: if str(info[0]) != '' and str(info[1]) != '': format_ = self.extract_format_from_string(str(info[1])) fname = self.add_extention_if_absent(str(info[0]), format_) if format_ == '*.h5': self.convert_and_save_to_h5(fname) else: qt.QMessageBox.warning(self, 'Warning', "Unknown format: %s" % format_) return else: if str(info[0]) != '' and str(info[1]) != '': message = "Could not save image as file if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) def convert_and_save_to_h5(self, fname): """ Save a stack as Nexus entry (create a new entry in the file each time) """ with Nexus(fname) as nxs: entry = nxs.new_entry(entry="entry", program_name="fabio_viewer", title="FabIO Viewer") nxdata = nxs.new_class(entry, "fabio", class_type="NXdata") # Read shape: if self.sequential_file_mode: total = len(self.sequential_file_list) tmpfname = self.sequential_file_dict[self.sequential_file_list[0]] img = self._open(tmpfname) if img is None: return data = img.data else: total = len(self.data_series) data = self.data_series[0] if self.transform_data_series: tmpdata = self.apply_queued_transformations(data) else: tmpdata = data shape = tmpdata.shape dataset = nxdata.create_dataset("data", shape=(total,) + shape, dtype=numpy.float32, chunks=(1,) + shape, compression="gzip") dataset.attrs["interpretation"] = "image" dataset.attrs["signal"] = "1" if self.sequential_file_mode: for iid, imgkey in enumerate(self.sequential_file_list): tmpfname = self.sequential_file_dict[imgkey] img = self._open(tmpfname) if img is None: continue self.progressBar.setValue((float(iid + 1) / (total)) * 100.) template = 'Converting and saving file %s. saving file number %d' self.log.appendPlainText(template % (tmpfname, iid)) qt.QCoreApplication.processEvents() if self.transform_data_series: tmpdata = self.apply_queued_transformations(img.data) else: tmpdata = img.data dataset[iid] = tmpdata else: for iid, data in enumerate(self.data_series): self.log.appendPlainText('Saving file number %d' % iid) self.progressBar.setValue((float(iid + 1) / (total)) * 100.) qt.QCoreApplication.processEvents() if self.transform_data_series: tmpdata = self.apply_queued_transformations(data) else: tmpdata = data dataset[iid] = tmpdata self.statusBar().clear() self.progressBar.setValue(0) self.log.appendPlainText('Hdf5 Recording: Complete') def add_extention_if_absent(self, fname, format_): posslash = fname.rfind("/") posdot = fname.rfind(".") if posdot > posslash: return fname else: return fname + format_[1:] def convert_and_write(self, fname, format_, data, header): if format_ == '*.bin': out = open(fname, mode="wb") out.write(data.tostring()) out.close() return elif format_ == '*.marccd': out = fabio.marccdimage.marccdimage(data=data, header=header) elif format_ == '*.edf': out = fabio.edfimage.edfimage(data=data, header=header) elif format_ == '*.tiff': out = fabio.tifimage.tifimage(data=data, header=header) elif format_ == '*.cbf': out = fabio.cbfimage.cbfimage(data=data, header=header) elif format_ in ['*.mar3450', '*.mar2300']: data = self.padd_mar(data, format_) out = fabio.mar345image.mar345image(data=data, header=header) elif format_ == '*.img': out = fabio.OXDimage.OXDimage(data=data, header=header) elif format_ == '*.sfrm': out = fabio.brukerimage.brukerimage(data=data, header=header) else: raise Warning("Unknown format: %s" % format_) template = 'Writing file %s to %s format, please wait...' self.statusBar().showMessage(template % (fname, format_[2:])) self.log.appendPlainText('Writing file %s to %s format' % (fname, format_[2:])) qt.QCoreApplication.processEvents() out.write(fname) self.statusBar().clearMessage() def convert_and_write_multiple_files(self, fname, format_): if self.sequential_file_mode: total = len(self.sequential_file_list) ii = 0 for imgkey in self.sequential_file_list: tmpfname = self.sequential_file_dict[imgkey] img = self._open(tmpfname) if img is None: continue self.progressBar.setValue((float(ii + 1) / (total)) * 100.) self.log.appendPlainText('Converting file %s' % tmpfname) qt.QCoreApplication.processEvents() if self.transform_data_series: tmpdata = self.apply_queued_transformations(img.data) else: tmpdata = img.data filename = ('%s_%s%s' % (fname, self.counter_format, format_[1:])) % ii self.convert_and_write(filename, format_, tmpdata, img.header) ii += 1 else: total = len(self.data_series) for i in range(len(self.data_series)): tmpdata = self.data_series[i] tmpheader = self.header_series[i] tmpfname = ('%s_%s%s' % (fname, self.counter_format, format_[1:])) % i self.progressBar.setValue((float(i + 1) / (total)) * 100.) self.log.appendPlainText('Converting file %s' % i) qt.QCoreApplication.processEvents() if self.transform_data_series: tmpdata = self.apply_queued_transformations(tmpdata) self.convert_and_write(tmpfname, format_, tmpdata, tmpheader) self.progressBar.setValue(0) self.log.appendPlainText('Convertion to %s: Complete' % format_[2:]) def extract_format_from_string(self, format_long): for fmt in output_format: if fmt in format_long: return fmt raise Warning("Unknown format: %s" % format_long) def horizontal_mirror(self): if self.transform_data_series: if self.sequential_file_mode: self.transformation_queue.addItem('horizontal_mirror') self.transform_list += ['horizontal_mirror'] self.log.appendPlainText('Add horizontal mirror to transformations queue') qt.QCoreApplication.processEvents() else: total = len(self.data_series) if not total: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) return for i in range(len(self.data_series)): self.data_series[i] = numpy.flipud(self.data_series[i])[:] self.progressBar.setValue((float(i + 1) / (total)) * 100.) self.log.appendPlainText('Applying horizontal mirror to data series: image %d' % i) qt.QCoreApplication.processEvents() iid = self.imgDict[str(self.images_list.currentText())] self.select_new_image(None, imgID=iid) else: if self.data.any(): self.data = numpy.flipud(self.data)[:] iid = self.imgDict[str(self.images_list.currentText())] self.data_series[iid] = self.data[:] self.log.appendPlainText('Applying horizontal mirror to current data') self.on_draw() else: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) self.progressBar.setValue(0) def vertical_mirror(self): if self.transform_data_series: if self.sequential_file_mode: self.transformation_queue.addItem('vertical_mirror') self.transform_list += ['vertical_mirror'] self.log.appendPlainText('Add vertical mirror to transformations queue') qt.QCoreApplication.processEvents() else: total = len(self.data_series) if not total: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) return for i in range(len(self.data_series)): self.data_series[i] = numpy.fliplr(self.data_series[i])[:] self.progressBar.setValue((float(i + 1) / (total)) * 100.) self.log.appendPlainText('Applying vertical mirror to data series: image %d' % i) qt.QCoreApplication.processEvents() iid = self.imgDict[str(self.images_list.currentText())] self.select_new_image(None, imgID=iid) else: if self.data.any(): self.data = numpy.fliplr(self.data)[:] iid = self.imgDict[str(self.images_list.currentText())] self.data_series[iid] = self.data[:] self.log.appendPlainText('Applying vertical mirror to current data') self.on_draw() else: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) self.progressBar.setValue(0) def transposition(self): if self.transform_data_series: if self.sequential_file_mode: self.transformation_queue.addItem('transposition') self.transform_list += ['transposition'] self.log.appendPlainText('Add transposition to transformations queue') qt.QCoreApplication.processEvents() else: total = len(self.data_series) if not total: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) return for i in range(len(self.data_series)): self.data_series[i] = self.data_series[i].transpose()[:] self.progressBar.setValue((float(i + 1) / (total)) * 100.) self.log.appendPlainText('Applying transposition to data series: image %d' % i) qt.QCoreApplication.processEvents() iid = self.imgDict[str(self.images_list.currentText())] self.select_new_image(None, imgID=iid) else: if self.data.any(): self.data = self.data.transpose()[:] iid = self.imgDict[str(self.images_list.currentText())] self.data_series[iid] = self.data[:] self.log.appendPlainText('Applying transposition to current data') self.on_draw() else: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) self.progressBar.setValue(0) def rotation_90(self): if self.transform_data_series: if self.sequential_file_mode: self.transformation_queue.addItem('rotation(+90)') self.transform_list += ['rotation(+90)'] self.log.appendPlainText('Add + 90 rotation to transformations queue') qt.QCoreApplication.processEvents() else: total = len(self.data_series) if not total: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) return for i in range(len(self.data_series)): self.data_series[i] = numpy.rot90(self.data_series[i])[:] self.progressBar.setValue((float(i + 1) / (total)) * 100.) self.log.appendPlainText('Applying + 90 rotation to data series: image %d' % i) qt.QCoreApplication.processEvents() iid = self.imgDict[str(self.images_list.currentText())] self.select_new_image(None, imgID=iid) else: if self.data.any(): self.data = numpy.rot90(self.data)[:] iid = self.imgDict[str(self.images_list.currentText())] self.data_series[iid] = self.data[:] self.log.appendPlainText('Applying + 90 rotation to current data') self.on_draw() else: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) self.progressBar.setValue(0) def rotation_180(self): if self.transform_data_series: if self.sequential_file_mode: self.transformation_queue.addItem('rotation(+180)') self.transform_list += ['rotation(+180)'] self.log.appendPlainText('Add + 180 rotation to transformations queue') qt.QCoreApplication.processEvents() else: total = len(self.data_series) if not total: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) return for i in range(len(self.data_series)): self.data_series[i] = numpy.rot90(self.data_series[i], 2)[:] self.progressBar.setValue((float(i + 1) / (total)) * 100.) self.log.appendPlainText('Applying + 180 rotation to data series: image %d' % i) qt.QCoreApplication.processEvents() iid = self.imgDict[str(self.images_list.currentText())] self.select_new_image(None, imgID=iid) else: if self.data.any(): self.data = numpy.rot90(self.data, 2)[:] iid = self.imgDict[str(self.images_list.currentText())] self.data_series[iid] = self.data[:] self.log.appendPlainText('Applying + 180 rotation to current data') self.on_draw() else: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) self.progressBar.setValue(0) def rotation_270(self): if self.transform_data_series: if self.sequential_file_mode: self.transformation_queue.addItem('rotation(-90)') self.transform_list += ['rotation(-90)'] self.log.appendPlainText('Add - 90 rotation to transformations queue') qt.QCoreApplication.processEvents() else: total = len(self.data_series) if not total: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) return for i in range(len(self.data_series)): self.data_series[i] = numpy.rot90(self.data_series[i], 3)[:] self.progressBar.setValue((float(i + 1) / (total)) * 100.) self.log.appendPlainText('Applying - 90 rotation to data series: image %d' % i) qt.QCoreApplication.processEvents() iid = self.imgDict[str(self.images_list.currentText())] self.select_new_image(None, imgID=iid) else: if self.data.any(): self.data = numpy.rot90(self.data, 3)[:] iid = self.imgDict[str(self.images_list.currentText())] self.data_series[iid] = self.data[:] self.log.appendPlainText('Applying - 90 rotation to current data') self.on_draw() else: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) self.progressBar.setValue(0) def mask(self): message = 'Select and import a boolean mask from binary data block file' fname = qt.QFileDialog.getOpenFileName(self, message) if isinstance(fname, tuple): # PyQt5 compatibility fname = fname[0] fname = str(fname) if fname: dial = BinDialog(self) dim1, dim2, offset, bytecode, endian = dial.exec_() if dim1 is not None and dim2 is not None: if endian == 'Short': endian = '<' else: endian = '>' img = fabio.binaryimage.binaryimage() img.read(fname, dim1, dim2, offset, bytecode, endian) self.mask = img.data[:] if self.transform_data_series: if self.sequential_file_mode: self.transformation_queue.addItem('masking') self.transform_list += ['masking'] self.log.appendPlainText('Add masking to transformations queue') qt.QCoreApplication.processEvents() else: total = len(self.data_series) if not total: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) return for i in range(len(self.data_series)): if self.data_series[i].shape != self.mask.shape: message = "Mask and image have different shapes, skipping image %d" % i qt.QMessageBox.warning(self, 'Warning', message) self.log.appendPlainText(message) else: self.data_series[i] = self.mask * self.data_series[i] self.progressBar.setValue((float(i + 1) / (total)) * 100.) self.log.appendPlainText('Applying mask to data series: image %d' % i) qt.QCoreApplication.processEvents() iid = self.imgDict[str(self.images_list.currentText())] self.select_new_image(None, imgID=iid) else: if self.data.any(): self.data = self.mask * self.data iid = self.imgDict[str(self.images_list.currentText())] self.data_series[iid] = self.data[:] self.on_draw() message = 'Binary boolean mask loaded and applied' self.statusBar().showMessage(message, 2000) self.log.appendPlainText(message) qt.QCoreApplication.processEvents() else: message = "Could not transform image if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) else: return self.progressBar.setValue(0) def apply_queued_transformations(self, data): transformations = ['horizontal_mirror', 'vertical_mirror', 'transposition', 'rotation(+90)', 'rotation(+180)', 'rotation(-90)', 'masking', 'downsampling'] for t in self.transform_list: if t in transformations: if t == 'horizontal_mirror': data = numpy.flipud(data)[:] self.log.appendPlainText('horizontal_mirror Done') elif t == 'vertical_mirror': data = numpy.fliplr(data)[:] self.log.appendPlainText('vertical_mirror Done') elif t == 'transposition': data = data.transpose()[:] self.log.appendPlainText('transposition Done') elif t == 'rotation(+90)': data = numpy.rot90(data)[:] self.log.appendPlainText('rotation(+90) Done') elif t == 'rotation(+180)': data = numpy.rot90(data, 2)[:] self.log.appendPlainText('rotation(+180) Done') elif t == 'rotation(-90)': data = numpy.rot90(data, 3)[:] self.log.appendPlainText('rotation(-90) Done') elif t == 'masking': data = self.mask * data self.log.appendPlainText('masking Done') else: raise Warning('Unknown transformation %s' % t) return data def transformation_options(self): if self.transform_option_action.isChecked(): self.transform_data_series = True else: self.transform_data_series = False def clear_transform_list(self): self.transform_list = [] self.transformation_queue.clear() def downsample(self): dial = DownSamplingDialog() thick, start_angle, step_angle = dial.exec_() if thick is not None: info = self._getSaveFileNameAndFilter(self, "Save downsampled data series as multiple files", qt.QDir.currentPath(), filter=self.tr(self.defaultSaveFilter)) if self.data_series or self.sequential_file_list: if str(info[0]) != '' and str(info[1]) != '': format_ = self.extract_format_from_string(str(info[1])) fname = self.os.path.splitext(str(info[0]))[0] if self.sequential_file_mode: total = len(self.sequential_file_list) img = fabio.open(self.sequential_file_dict[self.sequential_file_list[0]]) stack = numpy.zeros_like(img.data) t0 = time.time() subtotal = (total // thick) * thick for i in range(subtotal): j = i % thick k = i // thick imgkey = self.sequential_file_list[i] tmpfname = self.sequential_file_dict[imgkey] img = self._open(tmpfname) if img is None: continue if img.data.shape != stack.shape: message = "Error image shape: %s summed data shape: %s" % (img.data.shape, stack.shape) self.log.appendPlainText(message) continue numpy.add(stack, img.data, stack) self.progressBar.setValue((float(i + 1) / (subtotal)) * 100.) self.log.appendPlainText('File %s stacked' % imgkey) qt.QCoreApplication.processEvents() if j == thick - 1: self.log.appendPlainText('stack number %d summing up' % k) qt.QCoreApplication.processEvents() if format_ in ['*.mar3450', '*.mar2300']: img.header["PHI_START"] = '%.3f' % (start_angle + step_angle * (i - thick + 1)) img.header["PHI_END"] = '%.3f' % (start_angle + step_angle * (i)) filename = ('%s_%s%s' % (fname, self.counter_format, format_[1:])) % k self.convert_and_write(filename, format_, stack, img.header) t1 = time.time() print('time: %s' % (t1 - t0)) stack = numpy.zeros_like(img.data) t0 = time.time() else: total = len(self.data_series) stack = numpy.zeros_like(self.data_series[0]) subtotal = (total // thick) * thick for i in range(subtotal): j = i % thick k = i // thick data = self.data_series[i] if data.shape != stack.shape: message = "Error image shape: %s summed data shape: %s" % (img.data.shape, stack.shape) self.log.appendPlainText(message) continue numpy.add(stack, data, stack) self.progressBar.setValue((float(i + 1) / (subtotal)) * 100.) self.log.appendPlainText('File number %d stacked' % i) qt.QCoreApplication.processEvents() if j == thick - 1: self.log.appendPlainText('stack number %d summing up' % k) qt.QCoreApplication.processEvents() if format_ in ['*.mar3450', '*.mar2300']: self.header_series[i]["PHI_START"] = '%.3f' % (start_angle + step_angle * (i - thick + 1)) self.header_series[i]["PHI_END"] = '%.3f' % (start_angle + step_angle * (i)) filename = ('%s_%s%s' % (fname, self.counter_format, format_[1:])) % k self.convert_and_write(filename, format_, stack, self.header_series[i]) stack = numpy.zeros_like(data) self.progressBar.setValue(0) self.log.appendPlainText('Downsampling: Complete') qt.QCoreApplication.processEvents() else: if str(info[0]) != '' and str(info[1]) != '': message = "Could not save image as file if no data have been loaded" qt.QMessageBox.warning(self, 'Warning', message) def select_new_image(self, name, imgID=None): if imgID is not None: iid = imgID else: iid = self.imgDict[str(name)] self.data = self.data_series[iid] self.header = self.header_series[iid] self.headerTextEdit.setPlainText(str(self.format_header(self.header))) self.on_draw() def on_pick(self, event): if event.inaxes and self.data.any(): x = int(round(event.xdata)) y = int(round(event.ydata)) if x < self.data.shape[1] and y < self.data.shape[0]: i = self.data[y, x] self.pix_coords_label.setText("Pixel coordinates and intensity: x =%6d, y =%6d, i =%6g" % (x, y, i)) else: self.pix_coords_label.setText("Pixel coordinates and intensity: x = None , y = None , i = None ") else: self.pix_coords_label.setText("Pixel coordinates and intensity: x = None , y = None , i = None ") def on_draw(self): """ Redraws the figure""" self.statusBar().showMessage('Loading display...') qt.QCoreApplication.processEvents() # clear the axes and redraw a new plot self.axes.clear() # self.axes.imshow(numpy.log(numpy.clip(self.data,1.0e-12,1.0e260) ),interpolation = 'nearest') self.axes.imshow(numpy.log(self.data), interpolation='nearest') self.axes.set_visible(True) if self.axes.get_ylim()[0] < self.axes.get_ylim()[1]: self.axes.set_ylim(self.axes.get_ylim()[::-1]) self.canvas.draw() self.statusBar().clearMessage() def batch_to_view(self): items = self.imagelistWidget.selectedItems() iid = 0 item = items[0] item = str(item.text()) hdfxtens = ['.h5', '.H5', '.hdf', '.HDF', 'hdf5', '.HDF5'] for xtens in hdfxtens: if xtens in item: qt.QMessageBox.warning(self, 'Message', "Can't display hdf archive from batch mode ") return if self.sequential_file_mode: self.data_series = [] self.header_series = [] self.imgDict = {} self.images_list.clear() self.headerTextEdit.clear() self.axes.clear() self.canvas.draw() self.statusBar().showMessage('Import image %s in the View Mode tab, please wait...' % item) self.log.appendPlainText('Import image %s in the View Mode tab' % item) qt.QCoreApplication.processEvents() fname = self.sequential_file_dict[item] extract_fname = os.path.splitext(os.path.basename(fname))[0] img = self._open(fname) if img is None: return if img.nframes > 1: for img_idx in range(img.nframes): frame = img.getframe(img_idx) self.data_series.append(frame.data[:]) self.header_series.append(frame.header.copy()) frame_name = "%s # %i" % (extract_fname, img_idx) self.images_list.addItem(frame_name) self.imagelistWidget.addItem(frame_name) self.imgDict[frame_name] = iid self.sequential_file_list += [frame_name] self.sequential_file_dict[frame_name] = fname iid += 1 else: self.data_series.append(img.data[:]) self.header_series.append(img.header.copy()) extract_fname = self.extract_fname_from_path(fname) self.images_list.addItem(extract_fname) self.imagelistWidget.addItem(extract_fname) self.imgDict[extract_fname] = iid self.sequential_file_list += [extract_fname] self.sequential_file_dict[extract_fname] = fname iid += 1 self.statusBar().clearMessage() if self.data_series: self.select_new_image(None, imgID=0) self.tabWidget.setCurrentIndex(0) def set_counter_format_option(self): dial = CounterFormatOptionDialog(self.counter_format) self.counter_format = dial.exec_() def padd_mar(self, data, format_): dim1, dim2 = data.shape if format_ == '*.mar2300': size = 2300 else: size = 3450 left = (size - dim1) // 2 right = size - (dim1 + left) up = (size - dim2) // 2 down = size - (dim2 + up) out = numpy.zeros((size, size)) if left > 0: # pad outlm = left inlm = 0 else: # crop outlm = 0 inlm = -left if right > 0: # pad outrm = -right inrm = dim1 else: # crop outrm = size inrm = right if up > 0: # pad outum = up inum = 0 else: # crop outum = 0 inum = -up if down > 0: # pad outdm = -down indm = dim2 else: # crop outdm = size indm = down out[outlm:outrm, outum:outdm] = data[inlm:inrm, inum:indm] return out def sequential_option(self, state): if not self.h5_loaded: if state == qt.Qt.Checked: self.sequential_file_mode = True else: self.sequential_file_mode = False else: self.filecheckBox.stateChanged.disconnect() self.filecheckBox.setCheckState(False) self.sequential_file_mode = False self.filecheckBox.stateChanged.connect(self.sequential_option) message = 'Sequential file mode is not compatible with hdf5 input file: option removed' qt.QMessageBox.warning(self, 'Message', message) def create_main_frame(self): self.tabWidget = qt.QTabWidget() tab1 = qt.QWidget() self.tabWidget.addTab(tab1, "View Mode") tab2 = qt.QWidget() self.tabWidget.addTab(tab2, "Batch Mode") # Tab 1 # Create the mpl Figure and FigCanvas objects. # 100 dots-per-inch self.dpi = 100 # self.fig = Figure((100, 100), dpi=self.dpi) self.fig = Figure(dpi=self.dpi) self.canvas = FigureCanvasQTAgg(self.fig) self.canvas.setParent(tab1) # Since we have only one plot, we can use add_axes # instead of add_subplot, but then the subplot # configuration tool in the navigation toolbar wouldn't # work. # self.axes = self.fig.add_subplot(111) self.axes.set_visible(False) # Bind the 'pick' event for clicking on one of the bars self.canvas.mpl_connect('motion_notify_event', self.on_pick) # Create the navigation toolbar, tied to the canvas self.mpl_toolbar = NavigationToolbar2QT(self.canvas, tab1, coordinates=False) # Other GUI controls selector_label = qt.QLabel('Active Image:') self.images_list = qt.QComboBox(self) self.images_list.activated[str].connect(self.select_new_image) viewer_label = qt.QLabel("Images Viewer: ", self) self.pix_coords_label = qt.QLabel("Pixel coordinates and intensity: x = None , y = None , i = None ", self) self.mpl_toolbar.addWidget(self.pix_coords_label) self.headerTextEdit = qt.QPlainTextEdit(tab1) self.headerTextEdit.setReadOnly(True) # Layout with box sizers header_vbox = qt.QVBoxLayout() header_label = qt.QLabel("Header Info:", self) header_vbox.addWidget(header_label) header_vbox.addWidget(self.headerTextEdit) hbox = qt.QHBoxLayout() hbox.addWidget(selector_label, alignment=qt.Qt.AlignRight) hbox.addWidget(self.images_list) vbox = qt.QVBoxLayout() vbox.addWidget(viewer_label, alignment=qt.Qt.AlignVCenter) vbox.addWidget(self.canvas, alignment=qt.Qt.AlignVCenter) vbox.addWidget(self.mpl_toolbar, alignment=qt.Qt.AlignVCenter) vbox.addLayout(hbox) left = qt.QWidget() right = qt.QWidget() left.setLayout(header_vbox) right.setLayout(vbox) splitter = qt.QSplitter(qt.Qt.Horizontal) splitter.addWidget(left) splitter.addWidget(right) Bighbox = qt.QHBoxLayout() Bighbox.addWidget(splitter) tab1.setLayout(Bighbox) # Tab 2 imagelistvbox = qt.QVBoxLayout() imagelistlabel = qt.QLabel(tab2) imagelistlabel.setText("Images List:") self.imagelistWidget = qt.QListWidget(tab2) import_view_button = qt.QPushButton('Export image to View Mode', tab2) import_view_button.clicked.connect(self.batch_to_view) imagelistvbox.addWidget(imagelistlabel) imagelistvbox.addWidget(self.imagelistWidget) imagelistvbox.addWidget(import_view_button) rightsidevbox = qt.QVBoxLayout() optiongroupBox = qt.QGroupBox(tab2) optiongroupBox.setTitle("File Modes:") optionbox = qt.QVBoxLayout() self.butttonGroup = qt.QButtonGroup() self.filecheckBox = qt.QCheckBox() self.filecheckBox.setText("Sequential access (for large data series)") self.filecheckBox.stateChanged.connect(self.sequential_option) self.butttonGroup.addButton(self.filecheckBox) self.filecheckBox2 = qt.QCheckBox() self.filecheckBox2.setText("Direct access (all images are store in memory simultaneously)") self.filecheckBox2.setChecked(True) self.butttonGroup.addButton(self.filecheckBox2) self.butttonGroup.setExclusive(True) optionbox.addWidget(self.filecheckBox) optionbox.addWidget(self.filecheckBox2) optiongroupBox.setLayout(optionbox) rightsidevbox.addWidget(optiongroupBox) splitter3 = qt.QSplitter(qt.Qt.Vertical) queuegroupBox = qt.QGroupBox(tab2) queuegroupBox.setTitle("Transformations Queue:") queuebox = qt.QVBoxLayout() self.transformation_queue = qt.QListWidget(tab2) queuebox.addWidget(self.transformation_queue) clear_trans_list_button = qt.QPushButton('Clear Transformation List', tab2) clear_trans_list_button.clicked.connect(self.clear_transform_list) queuebox.addWidget(clear_trans_list_button) queuegroupBox.setLayout(queuebox) splitter3.addWidget(queuegroupBox) loggroupBox = qt.QGroupBox(tab2) loggroupBox.setTitle("Log View:") logbox = qt.QHBoxLayout() self.log = qt.QPlainTextEdit() logbox.addWidget(self.log) loggroupBox.setLayout(logbox) splitter3.addWidget(loggroupBox) splitter3.setStretchFactor(1, 1) rightsidevbox.addWidget(splitter3) self.progressBar = qt.QProgressBar(tab2) self.progressBar.setProperty("value", 0) rightsidevbox.addWidget(self.progressBar) left2 = qt.QWidget() right2 = qt.QWidget() left2.setLayout(imagelistvbox) right2.setLayout(rightsidevbox) splitter2 = qt.QSplitter(qt.Qt.Horizontal) splitter2.addWidget(left2) splitter2.addWidget(right2) splitter2.setStretchFactor(1, 2) Bighbox2 = qt.QHBoxLayout() Bighbox2.addWidget(splitter2) tab2.setLayout(Bighbox2) self.setCentralWidget(self.tabWidget) def create_status_bar(self): self.status_text = qt.QLabel('') self.statusBar().addWidget(self.status_text, 1) self.statusBar().showMessage('Thanks for using FabIO viewer.', 5000) def on_about(self): msg = [__doc__, "", "Version: \t\t%s" % __version__, "FabIO version: \t%s" % fabio.version, "Author: \t\t%s" % __author__, "Copyright: \t\t%s" % __copyright__, "License: \t\t%s" % __licence__] qt.QMessageBox.about(self, "About FabIO Viewer", os.linesep.join(msg)) def create_menu(self): self.file_menu = self.menuBar().addMenu("&File") self.open_menu = self.file_menu.addMenu("&Open") action = self.create_action("&Image(s)", shortcut="", slot=self.open_data_series, tip="Load single file and data series (files sequence)") self.add_actions(self.open_menu, (action,)) action = self.create_action("&Hdf5 data series", shortcut="", slot=self.open_h5_data_series, tip="Load single file and data series (files sequence)") self.add_actions(self.open_menu, (action,)) self.save_as_menu = self.file_menu.addMenu("&Save") action = self.create_action("&Active image", slot=self.save_as, shortcut="", tip="Save/Convert the image which is currently displayed") self.add_actions(self.save_as_menu, (action,)) self.save_data_series_menu = self.save_as_menu.addMenu("&Data series as") action = self.create_action("&Multiple files", slot=self.save_data_series_as_multiple_file, shortcut="", tip="Save/Convert the set of images currently loaded into the images list") self.add_actions(self.save_data_series_menu, (action,)) action = self.create_action("&Hdf5 archive", slot=self.save_data_series_as_singlehdf, shortcut="", tip="Save/Convert the set of images currently loaded into the images list") self.add_actions(self.save_data_series_menu, (action,)) action = self.create_action("&Quit", slot=self.close, shortcut="Ctrl+Q", tip="Close the application") self.add_actions(self.file_menu, (action,)) self.transform_menu = self.menuBar().addMenu("&Transform") self.mirror_menu = self.transform_menu.addMenu("&Mirror") action = self.create_action("&Horizontal", shortcut='', slot=self.horizontal_mirror, tip="Horizontal mirror") self.add_actions(self.mirror_menu, (action,)) action = self.create_action("&Vertical", shortcut='', slot=self.vertical_mirror, tip="Vertical mirror") self.add_actions(self.mirror_menu, (action,)) action = self.create_action("&Transposition", shortcut='', slot=self.transposition, tip="Transposition") self.add_actions(self.mirror_menu, (action,)) self.rotation_menu = self.transform_menu.addMenu("&Rotation") action = self.create_action("+90", shortcut='', slot=self.rotation_90, tip="Rotation of +90 degrees (counter-clockwise)") self.add_actions(self.rotation_menu, (action,)) action = self.create_action("+180", shortcut='', slot=self.rotation_180, tip="Rotation of +180 degrees (counter-clockwise)") self.add_actions(self.rotation_menu, (action,)) action = self.create_action("- 90", shortcut='', slot=self.rotation_270, tip="Rotation of -90 degrees (counter-clockwise)") self.add_actions(self.rotation_menu, (action,)) action = self.create_action("&Mask", shortcut='', slot=self.mask, tip="Import a mask from file and apply it to image(s)") self.add_actions(self.transform_menu, (action,)) action = self.create_action("&Downsample", shortcut='', slot=self.downsample, tip="Summation over groups of images") self.add_actions(self.transform_menu, (action,)) tip = "Define if transformations are applied to the whole data series (checked)"\ " or only to the active image (unchecked) " action = self.create_action("&Apply transform to the whole data series", shortcut='', slot=self.transformation_options, tip=tip) action.setCheckable(True) self.add_actions(self.transform_menu, (action,)) self.transform_option_action = action self.options = self.menuBar().addMenu("&Options") action = self.create_action("&Counter format", shortcut='', slot=self.set_counter_format_option, tip='Allow to define the format for the counter in multiple saving') self.add_actions(self.options, (action,)) self.help_menu = self.menuBar().addMenu("&Help") action = self.create_action("&About", shortcut='F1', slot=self.on_about, tip='About Images Converter') self.add_actions(self.help_menu, (action,)) def add_actions(self, target, actions): for action in actions: if action is None: target.addSeparator() else: target.addAction(action) def create_action(self, text, slot=None, shortcut=None, icon=None, tip=None, checkable=False, signal="triggered"): action = qt.QAction(text, self) if icon is not None: action.setIcon(qt.QIcon(":/%s.png" % icon)) if shortcut is not None: action.setShortcut(shortcut) if tip is not None: action.setToolTip(tip) action.setStatusTip(tip) if slot is not None: getattr(action, signal).connect(slot) if checkable: action.setCheckable(True) return action class CounterFormatOptionDialog(qt.QDialog): # option doivent refleter l etat des couche du dessous """Dialog containing entry for down sampling""" def __init__(self, counter_format, parent=None): qt.QDialog.__init__(self, parent) self.resize(350, 100) self.setWindowTitle('Options') self.counter_format = counter_format buttonBox = qt.QDialogButtonBox(self) buttonBox.setGeometry(qt.QRect(0, 60, 341, 32)) buttonBox.setOrientation(qt.Qt.Horizontal) buttonBox.setStandardButtons(qt.QDialogButtonBox.Cancel | qt.QDialogButtonBox.Ok) label = qt.QLabel(self) label.setGeometry(qt.QRect(38, 23, 181, 16)) label.setText("File Counter format:") self.lineEdit = qt.QLineEdit(self) self.lineEdit.setGeometry(qt.QRect(175, 18, 113, 25)) self.lineEdit.setText(counter_format) buttonBox.accepted.connect(self.accept) buttonBox.rejected.connect(self.reject) def exec_(self): if qt.QDialog.exec_(self) == qt.QDialog.Accepted: if str(self.lineEdit.text()) != '': return str(self.lineEdit.text()) else: message = "All informations are mandatory, please fill the blanks" qt.QMessageBox.warning(self, 'Warning', message) else: return self.counter_format class DownSamplingDialog(qt.QDialog): """Dialog containing entry for down sampling""" def __init__(self, parent=None): qt.QDialog.__init__(self, parent) self.resize(407, 250) self.setWindowTitle('Downsampling') buttonBox = qt.QDialogButtonBox(self) buttonBox.setGeometry(qt.QRect(45, 200, 341, 32)) buttonBox.setOrientation(qt.Qt.Horizontal) buttonBox.setStandardButtons(qt.QDialogButtonBox.Cancel | qt.QDialogButtonBox.Ok) label = qt.QLabel(self) label.setGeometry(qt.QRect(38, 63, 181, 16)) label.setText("Number of files to sum up:") self.lineEdit = qt.QLineEdit(self) self.lineEdit.setGeometry(qt.QRect(220, 58, 113, 25)) label2 = qt.QLabel(self) label2.setGeometry(qt.QRect(90, 100, 131, 20)) label2.setText("Starting Phi angle:") self.lineEdit2 = qt.QLineEdit(self) self.lineEdit2.setGeometry(qt.QRect(220, 95, 113, 25)) label3 = qt.QLabel(self) label3.setGeometry(qt.QRect(151, 133, 101, 16)) label3.setText("Phi step:") self.lineEdit3 = qt.QLineEdit(self) self.lineEdit3.setGeometry(qt.QRect(219, 130, 113, 25)) buttonBox.accepted.connect(self.accept) buttonBox.rejected.connect(self.reject) def exec_(self): if qt.QDialog.exec_(self) == qt.QDialog.Accepted: if str(self.lineEdit.text()) != '' and str(self.lineEdit2.text()) != '' and str(self.lineEdit3.text()) != '': return int(str(self.lineEdit.text())), float(str(self.lineEdit2.text())), float(str(self.lineEdit3.text())) else: message = "All informations are mandatory, please fill the blanks" qt.QMessageBox.warning(self, 'Warning', message) else: return None, None, None class BinDialog(qt.QDialog): """Dialog containing entry for binary data block opening""" def __init__(self, parent=None): qt.QDialog.__init__(self, parent) self.resize(410, 270) self.setWindowTitle("Binary data block opening") self.dim1 = None self.dim2 = None self.offset = None self.bytecode = None self.endian = None buttonBox = qt.QDialogButtonBox(self) buttonBox.setGeometry(qt.QRect(50, 230, 341, 32)) buttonBox.setOrientation(qt.Qt.Horizontal) buttonBox.setStandardButtons(qt.QDialogButtonBox.Cancel | qt.QDialogButtonBox.Ok) groupBox = qt.QGroupBox(self) groupBox.setGeometry(qt.QRect(10, 10, 370, 191)) groupBox.setTitle("Binary data block required informations:") label = qt.QLabel(self) label.setGeometry(qt.QRect(67, 48, 91, 16)) label.setText("Dimention 1:") label_2 = qt.QLabel(self) label_2.setGeometry(qt.QRect(66, 76, 91, 16)) label_2.setText("Dimention 2:") self.lineEdit = qt.QLineEdit(self) self.lineEdit.setGeometry(qt.QRect(185, 40, 91, 25)) self.lineEdit_2 = qt.QLineEdit(self) self.lineEdit_2.setGeometry(qt.QRect(185, 70, 91, 25)) label_5 = qt.QLabel(self) label_5.setGeometry(qt.QRect(105, 106, 51, 16)) label_5.setText("Offset:") self.lineEdit_3 = qt.QLineEdit(self) self.lineEdit_3.setGeometry(qt.QRect(184, 100, 91, 25)) self.lineEdit_3.setText('0') label_3 = qt.QLabel(groupBox) label_3.setGeometry(qt.QRect(70, 130, 91, 16)) label_3.setText("ByteCode:") self.comboBox = qt.QComboBox(groupBox) self.comboBox.setGeometry(qt.QRect(173, 123, 91, 25)) bytecodes = ["int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "float32", "float64"] for bytecode in bytecodes: self.comboBox.addItem(bytecode) self.comboBox.setCurrentIndex(2) label_4 = qt.QLabel(self) label_4.setGeometry(qt.QRect(98, 170, 61, 16)) label_4.setText("Endian:") self.comboBox_2 = qt.QComboBox(self) self.comboBox_2.setGeometry(qt.QRect(182, 166, 91, 25)) self.comboBox_2.addItem("Short") self.comboBox_2.addItem("Long") buttonBox.rejected.connect(self.cancel) buttonBox.accepted.connect(self.binary_block_info) def binary_block_info(self): if str(self.lineEdit.text()) != '' and str(self.lineEdit_2.text()) != '' and str(self.lineEdit_3.text()) != '': self.dim1 = int(str(self.lineEdit.text())) self.dim2 = int(str(self.lineEdit_2.text())) self.offset = int(str(self.lineEdit_3.text())) else: message = "All informations are mandatory, please fill the blanks" qt.QMessageBox.warning(self, 'Warning', message) return self.bytecode = str(self.comboBox.currentText()) self.endian = str(self.comboBox_2.currentText()) self.accept() def cancel(self): self.close() def exec_(self): if qt.QDialog.exec_(self) == qt.QDialog.Accepted: return self.dim1, self.dim2, self.offset, self.bytecode, self.endian else: return None, None, None, None, None def main(): parser = ArgumentParser(prog="fabio_viewer", usage="fabio_viewer img1 img2... imgn", description=__doc__, epilog="Based on FabIO version %s" % fabio.version) parser.add_argument("images", nargs="*") parser.add_argument("-V", "--version", action='version', version=__version__, help="Print version & quit") args = parser.parse_args() qt.QApplication.setStyle(qt.QStyleFactory.create("Cleanlooks")) app = qt.QApplication([]) form = AppForm() if args.images: form.open_data_series(args.images) form.show() return app.exec_() if __name__ == "__main__": result = main() sys.exit(result) fabio-0.6.0/fabio/app/_qt.py0000644001611600070440000001251413227357030016712 0ustar kiefferscisoft00000000000000# coding: utf-8 # /*########################################################################## # # Copyright (c) 2004-2017 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ """Common wrapper over Python Qt bindings: - `PyQt5 `_, - `PyQt4 `_ or - `PySide `_. If a Qt binding is already loaded, it will use it, otherwise the different Qt bindings are tried in this order: PyQt4, PySide, PyQt5. The name of the loaded Qt binding is stored in the BINDING variable. For an alternative solution providing a structured namespace, see `qtpy `_ which provides the namespace of PyQt5 over PyQt4 and PySide. """ __authors__ = ["V.A. Sole - ESRF Data Analysis"] __license__ = "MIT" __date__ = "07/09/2017" import logging import sys import traceback _logger = logging.getLogger(__name__) BINDING = None """The name of the Qt binding in use: 'PyQt5', 'PyQt4' or 'PySide'.""" QtBinding = None # noqa """The Qt binding module in use: PyQt5, PyQt4 or PySide.""" HAS_SVG = False """True if Qt provides support for Scalable Vector Graphics (QtSVG).""" HAS_OPENGL = False """True if Qt provides support for OpenGL (QtOpenGL).""" # First check for an already loaded wrapper if 'PySide.QtCore' in sys.modules: BINDING = 'PySide' elif 'PyQt5.QtCore' in sys.modules: BINDING = 'PyQt5' elif 'PyQt4.QtCore' in sys.modules: BINDING = 'PyQt4' else: # Then try Qt bindings try: import PyQt4 # noqa except ImportError: try: import PySide # noqa except ImportError: try: import PyQt5 # noqa except ImportError: raise ImportError( 'No Qt wrapper found. Install PyQt4, PyQt5 or PySide.') else: BINDING = 'PyQt5' else: BINDING = 'PySide' else: BINDING = 'PyQt4' if BINDING == 'PyQt4': _logger.debug('Using PyQt4 bindings') if sys.version < "3.0.0": try: import sip sip.setapi("QString", 2) sip.setapi("QVariant", 2) except: _logger.warning("Cannot set sip API") import PyQt4 as QtBinding # noqa from PyQt4.QtCore import * # noqa from PyQt4.QtGui import * # noqa try: from PyQt4.QtOpenGL import * # noqa except ImportError: _logger.info("PyQt4.QtOpenGL not available") HAS_OPENGL = False else: HAS_OPENGL = True try: from PyQt4.QtSvg import * # noqa except ImportError: _logger.info("PyQt4.QtSvg not available") HAS_SVG = False else: HAS_SVG = True from PyQt4.uic import loadUi # noqa Signal = pyqtSignal Property = pyqtProperty Slot = pyqtSlot elif BINDING == 'PySide': _logger.debug('Using PySide bindings') import PySide as QtBinding # noqa from PySide.QtCore import * # noqa from PySide.QtGui import * # noqa try: from PySide.QtOpenGL import * # noqa except ImportError: _logger.info("PySide.QtOpenGL not available") HAS_OPENGL = False else: HAS_OPENGL = True try: from PySide.QtSvg import * # noqa except ImportError: _logger.info("PySide.QtSvg not available") HAS_SVG = False else: HAS_SVG = True pyqtSignal = Signal elif BINDING == 'PyQt5': _logger.debug('Using PyQt5 bindings') import PyQt5 as QtBinding # noqa from PyQt5.QtCore import * # noqa from PyQt5.QtGui import * # noqa from PyQt5.QtWidgets import * # noqa from PyQt5.QtPrintSupport import * # noqa try: from PyQt5.QtOpenGL import * # noqa except ImportError: _logger.info("PySide.QtOpenGL not available") HAS_OPENGL = False else: HAS_OPENGL = True try: from PyQt5.QtSvg import * # noqa except ImportError: _logger.info("PyQt5.QtSvg not available") HAS_SVG = False else: HAS_SVG = True from PyQt5.uic import loadUi # noqa Signal = pyqtSignal Property = pyqtProperty Slot = pyqtSlot else: raise ImportError('No Qt wrapper found. Install PyQt4, PyQt5 or PySide') fabio-0.6.0/fabio/app/setup.py0000644001611600070440000000320113227357030017260 0ustar kiefferscisoft00000000000000# coding: utf-8 # /*########################################################################## # Copyright (C) 2016 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ############################################################################*/ __authors__ = ["V. Valls"] __license__ = "MIT" __date__ = "18/05/2017" from numpy.distutils.misc_util import Configuration def configuration(parent_package='', top_path=None): config = Configuration('app', parent_package, top_path) return config if __name__ == "__main__": from numpy.distutils.core import setup setup(configuration=configuration) fabio-0.6.0/fabio/app/_matplotlib.py0000644001611600070440000000503013227357030020430 0ustar kiefferscisoft00000000000000# coding: utf-8 # /*########################################################################## # # Copyright (c) 2016-2017 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ """This module inits matplotlib and setups the backend to use. It MUST be imported prior to any other import of matplotlib. It provides the matplotlib :class:`FigureCanvasQTAgg` class corresponding to the used backend. """ __authors__ = ["T. Vincent"] __license__ = "MIT" __date__ = "08/01/2018" import sys import logging _logger = logging.getLogger(__name__) if 'matplotlib' in sys.modules: _logger.warning( 'matplotlib already loaded, setting its backend may not work') from . import _qt as qt import matplotlib if qt.BINDING == 'PySide': matplotlib.rcParams['backend'] = 'Qt4Agg' matplotlib.rcParams['backend.qt4'] = 'PySide' from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg # noqa from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT # noqa elif qt.BINDING == 'PyQt4': matplotlib.rcParams['backend'] = 'Qt4Agg' from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg # noqa from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT # noqa elif qt.BINDING == 'PyQt5': matplotlib.rcParams['backend'] = 'Qt5Agg' from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg # noqa from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT # noqa fabio-0.6.0/fabio/app/convert.py0000644001611600070440000003426013227357030017611 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. """Portable image converter based on FabIO library. """ from __future__ import with_statement, print_function __author__ = "Valentin Valls" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __licence__ = "MIT" __date__ = "31/07/2017" __status__ = "production" import logging logging.basicConfig() import sys import os import glob import fabio from fabio.third_party import six from fabio.third_party import argparse logger = logging.getLogger("fabio-convert") def get_default_extension_from_format(format_name): """" Get a default file extension from a fabio format :param str format: String format like "edfimage" :rtype: str """ class_ = fabio.fabioformats.get_class_by_name(format_name) if class_ is None: raise RuntimeError("Format '%s' unsupported" % format_name) extensions = class_.DEFAULT_EXTENSIONS if len(extensions) == 0: # No extensions return "" else: return extensions[0] def get_output_filename(input_filename, format_name): """ Returns the output filename from the input filename and the format. :param str input_filename: Input filename path :param str format_name: String format like "edfimage" :rtype: str """ basename, _ = os.path.splitext(input_filename) extension = get_default_extension_from_format(format_name) if extension == "": extension = "bin" return basename + "." + extension def is_user_want_to_overwrite_filename(filename): """ Ask question in the shell and returns true if the user want to overwrite a file passed in parameter. :param str filename: The filename it asks for :rtype: bool """ while True: question = "Do you want to overwrite the file '%s' (y/n): " % filename answer = six.moves.input(question) answer = answer.strip().lower() if answer in ["y", "yes", "n", "no"]: break return answer in ["y", "yes"] def is_older(filename1, filename2): """Returns true if the first file is older than the second one. :param str filename1: An existing filename :param str filename2: An existing filename :rtype: bool """ time1 = os.path.getmtime(filename1) time2 = os.path.getmtime(filename2) return time1 > time2 def convert_one(input_filename, output_filename, options): """ Convert a single file using options :param str input_filename: The input filename :param str output_filename: The output filename :param object options: List of options provided from the command line :rtype: bool :returns: True is the conversion succeeded """ input_filename = os.path.abspath(input_filename) input_exists = os.path.exists(input_filename) output_filename = os.path.abspath(output_filename) output_exists = os.path.exists(output_filename) if options.verbose: print("Converting file '%s' to '%s'" % (input_filename, output_filename)) if not input_exists: logger.error("Input file '%s' do not exists. Conversion skipped.", input_filename) return False skip_conversion = False remove_file = False if output_exists: if options.interactive: if is_user_want_to_overwrite_filename(output_filename): remove_file = True else: skip_conversion = True elif options.no_clobber: skip_conversion = True elif options.force or options.remove_destination: remove_file = True elif options.update: if is_older(output_filename, input_filename): skip_conversion = True else: remove_file = True elif is_user_want_to_overwrite_filename(output_filename): remove_file = True else: skip_conversion = True if remove_file: if options.verbose: print("Overwrite file %s" % output_filename) try: if not options.dry_run: os.remove(output_filename) except OSError as e: logger.error("Removing previous file %s failed cause: \"%s\". Conversion skipped.", e.message, output_filename) logger.debug("Backtrace", exc_info=True) return False if skip_conversion: if options.verbose: print("Conversion to file %s skipped" % output_filename) return True try: logger.debug("Load '%s'", input_filename) source = fabio.open(input_filename) except KeyboardInterrupt: raise except Exception as e: logger.error("Loading input file '%s' failed cause: \"%s\". Conversion skipped.", input_filename, e.message) logger.debug("Backtrace", exc_info=True) return False try: logger.debug("Convert '%s' into '%s'", input_filename, options.format) converted = source.convert(options.format) except KeyboardInterrupt: raise except Exception as e: logger.error("Converting input file '%s' failed cause: \"%s\". Conversion skipped.", input_filename, e.message) logger.debug("Backtrace", exc_info=True) return False try: logger.debug("Write '%s'", output_filename) if not options.dry_run: converted.write(output_filename) except KeyboardInterrupt: raise except Exception as e: logger.error("Saving output file '%s' failed cause: \"%s\". Conversion skipped.", output_filename, e.message) logger.debug("Backtrace", exc_info=True) return False # a success return True def convert_all(options): """Convert all the files from the command line. :param object options: List of options provided from the command line :rtype: bool :returns: True is the conversion succeeded """ succeeded = True for filename in options.images: if options.output is None: output_filename = get_output_filename(filename, options.format) elif os.path.isdir(options.output): output_filename = get_output_filename(filename, options.format) output_filename = os.path.basename(output_filename) directory = os.path.abspath(options.output) output_filename = os.path.join(directory, output_filename) else: output_filename = options.output succeeded = succeeded and convert_one(filename, output_filename, options) return succeeded def print_supported_formats(): """List supported format to the output""" classes = fabio.fabioformats.get_classes(writer=True) classes.sort(key=lambda c: c.__module__.lower()) indentation = " " print("List of writable file formats supported by FabIO version %s" % fabio.version) print() for class_ in classes: if len(class_.DEFAULT_EXTENSIONS) > 0: extensions = ", ".join(["*." + x for x in class_.DEFAULT_EXTENSIONS]) extensions = "(%s)" % extensions else: extensions = "" print("- %s %s" % (class_.codec_name(), extensions)) print("%s%s" % (indentation, class_.DESCRIPTION)) def is_format_supported(format_name): """ Returns true if the file format is supported. :param str format_name: Name of the format (for example edfimage) :rtype: bool """ try: fabio.fabioimage.FabioImage.factory(format_name) return True except RuntimeError: logger.debug("Backtrace", exc_info=True) return False def expand_args(args): """ Takes an argv and expand it (under Windows, cmd does not convert *.tif into a list of files. :param list args: list of files or wildcards :return: list of actual args """ new = [] for afile in args: if glob.has_magic(afile): new += glob.glob(afile) else: new.append(afile) return new EXIT_SUCCESS = 0 EXIT_FAILURE = 1 EXIT_ARGUMENT_FAILURE = 2 def main(): epilog = """return codes: 0 means a success. 1 means the conversion contains a failure, 2 means there was an error in the arguments""" parser = argparse.ArgumentParser(prog="fabio-convert", description=__doc__, epilog=epilog) parser.add_argument("IMAGE", nargs="*", help="Input file images") parser.add_argument("-V", "--version", action='version', version=fabio.version, help="output version and exit") parser.add_argument("-v", "--verbose", action='store_true', dest="verbose", default=False, help="show information for each conversions") parser.add_argument("--debug", action='store_true', dest="debug", default=False, help="show debug information") group = parser.add_argument_group("main arguments") group.add_argument("-l", "--list", action="store_true", dest="list", default=None, help="show the list of available formats and exit") group.add_argument("-o", "--output", dest='output', type=str, help="output file or directory") group.add_argument("-F", "--output-format", dest="format", type=str, default=None, help="output format") group = parser.add_argument_group("optional behaviour arguments") group.add_argument("-f", "--force", dest="force", action="store_true", default=False, help="if an existing destination file cannot be" + " opened, remove it and try again (this option" + " is ignored when the -n option is also used)") group.add_argument("-n", "--no-clobber", dest="no_clobber", action="store_true", default=False, help="do not overwrite an existing file (this option" + " is ignored when the -i option is also used)") group.add_argument("--remove-destination", dest="remove_destination", action="store_true", default=False, help="remove each existing destination file before" + " attempting to open it (contrast with --force)") group.add_argument("-u", "--update", dest="update", action="store_true", default=False, help="copy only when the SOURCE file is newer" + " than the destination file or when the" + " destination file is missing") group.add_argument("-i", "--interactive", dest="interactive", action="store_true", default=False, help="prompt before overwrite (overrides a previous -n" + " option)") group.add_argument("--dry-run", dest="dry_run", action="store_true", default=False, help="do everything except modifying the file system") try: args = parser.parse_args() if args.debug: logger.setLevel(logging.DEBUG) if args.list: print_supported_formats() return if len(args.IMAGE) == 0: raise argparse.ArgumentError(None, "No input file specified.") # the upper case IMAGE is used for the --help auto-documentation args.images = expand_args(args.IMAGE) args.images.sort() if args.format is None or not args.format.endswith("image"): if args.format is None: if args.output is None: raise argparse.ArgumentError(None, "No format specified. Use -F or -o.") dummy_filename = args.output else: # format looks to be an extension dummy_filename = "foo." + args.format # extract file format from file name filename = fabio.fabioutils.FilenameObject(filename=dummy_filename) if filename.format is None or len(filename.format) == 0: raise argparse.ArgumentError(None, "This file extension is unknown. You have also to specify a format using -F.") elif filename.format is None or len(filename.format) > 1: formats = [i + "image" for i in filename.format] formats = ', '.join(formats) raise argparse.ArgumentError(None, "This file extension correspond to different file formats: '%s'. You have to specify it using -F." % formats) args.format = filename.format[0] + "image" if not is_format_supported(args.format): raise argparse.ArgumentError(None, "Format '%s' is unknown. Use -l to list all available formats." % args.format) except argparse.ArgumentError as e: logger.error(e.message) logger.debug("Backtrace", exc_info=True) return EXIT_ARGUMENT_FAILURE succeeded = convert_all(args) if not succeeded: print("Conversion or part of it failed. You can try with --debug to have more output information.") return EXIT_FAILURE return EXIT_SUCCESS if __name__ == "__main__": result = main() sys.exit(result) fabio-0.6.0/fabio/__init__.py0000644001611600070440000000677113227357030017116 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """FabIO module""" from __future__ import absolute_import, print_function, division __author__ = "Jérôme Kieffer" __contact__ = "Jerome.Kieffer@ESRF.eu" __license__ = "GPLv3+" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __date__ = "16/01/2018" __status__ = "stable" import sys import logging if "ps1" in dir(sys): # configure logging with interactive console logging.basicConfig() import os project = os.path.basename(os.path.dirname(os.path.abspath(__file__))) try: from ._version import __date__ as date # noqa from ._version import version, version_info, hexversion, strictversion # noqa except ImportError: raise RuntimeError("Do NOT use %s from its sources: build it and use the built version" % project) from . import fabioimage factory = fabioimage.FabioImage.factory from . import openimage from .fabioutils import COMPRESSORS, jump_filename, FilenameObject, \ previous_filename, next_filename, deconstruct_filename, \ extract_filenumber, getnum, construct_filename, exists # Compatibility with outside world: filename_object = FilenameObject from .openimage import openimage as open from .openimage import openheader as openheader def register(codec_class): """ Register a codec class with the set of formats supported by fabio. It is a transitional function to prepare the next comming version of fabio. - On the current fabio library, when a module is imported, all the formats inheriting FabioImage are automatically registred. And this function is doing nothing. - On the next fabio library. Importing a module containing classes inheriting FabioImage will not be registered. And this function will register the class. The following source code will then provide the same behaviour on both fabio versions, and it is recommended to use it. .. code-block:: python @fabio.register class MyCodec(fabio.fabioimage.FabioImage): pass """ assert(issubclass(codec_class, fabioimage.FabioImage)) return codec_class def tests(): """ Run the FabIO test suite. If the test-images are not already installed (via the debian package for example), they need to be downloaded from sourceforge.net, which make take a while. Ensure your network connection is operational and your proxy settings are correct, for example: export http_proxy=http://proxy.site.com:3128 """ from . import test test.run_tests() def benchmarks(): """ Run the benchmarks """ from . import benchmark res = benchmark.run() return res fabio-0.6.0/fabio/fit2dimage.py0000644001611600070440000001434113227357030017362 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: FabIO X-ray image reader # # Copyright (C) 2010-2016 European Synchrotron Radiation Facility # Grenoble, France # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """FabIO reader for Fit2D binary images TODO: handle big-endian files """ # Get ready for python3: from __future__ import with_statement, print_function, division __authors__ = ["Jérôme Kieffer"] __contact__ = "jerome.kiefer@esrf.fr" __license__ = "MIT" __copyright__ = "2016-2016 European Synchrotron Radiation Facility" __date__ = "11/08/2017" import logging logger = logging.getLogger(__name__) import numpy from .fabioimage import FabioImage, OrderedDict def hex_to(stg, type_="int"): """convert a 8-byte-long string (bytes) into an int or a float :param stg: bytes string :param str type_: "int" or "float" """ value = int(stg, 16) if type_ == "float": value = numpy.array([int("38d1b717", 16)], "int32").view("float32")[0] return value class Fit2dImage(FabioImage): """ FabIO image class for Images for XXX detector """ DESCRIPTION = "Fit2d file format" DEFAULT_EXTENSIONS = ["f2d"] BUFFER_SIZE = 512 # size of the buffer PIXELS_PER_CHUNK = 128 ENC = "ascii" def __init__(self, *arg, **kwargs): """ Generic constructor """ FabioImage.__init__(self, *arg, **kwargs) self.num_block = None def _readheader(self, infile): """ Read and decode the header of an image: :param infile: Opened python file (can be stringIO or bipped file) """ # list of header key to keep the order (when writing) header = OrderedDict() self.header = self.check_header() while True: line = infile.read(self.BUFFER_SIZE) if len(line) < self.BUFFER_SIZE: break if line[0:1] != b"\\": for block_read in range(2, 16): line = infile.read(self.BUFFER_SIZE) if line[0:1] == b"\\": self.BUFFER_SIZE *= block_read logger.warning("Increase block size to %s ", self.BUFFER_SIZE) infile.seek(0) break else: err = "issue while reading header, expected '\', got %s" % line[0] logger.error(err) raise RuntimeError(err) key, line = line.split(b":", 1) num_block = hex_to(line[:8]) # metadatatype = chr(line[8]) if six.PY3 else line[8].decode(self.ENC) metadatatype = line[8:9].decode(self.ENC) key = key[1:].decode(self.ENC) if metadatatype == "s": len_value = hex_to(line[9:17]) header[key] = line[17:17 + len_value].decode(self.ENC) elif metadatatype == "r": header[key] = hex_to(line[9:17], "float") elif metadatatype == "i": header[key] = hex_to(line[9:17]) elif metadatatype == "a" and num_block != 0: # "a" self.num_block = num_block array_type = line[9:10].decode(self.ENC) dim1 = hex_to(line[26:34]) dim2 = hex_to(line[34:42]) if array_type == "i": bytecode = "int32" bpp = 4 elif array_type == "r": bytecode = "float32" bpp = 4 elif array_type == "l": bytecode = "int8" bpp = 1 raw = infile.read(self.num_block * self.BUFFER_SIZE) # Fit2d stores 31 pixels per int32 i32 = numpy.fromstring(raw, "int32") if numpy.little_endian: # lets's work in big-endian for the moment i32.byteswap(True) r32 = numpy.unpackbits(i32.view("uint8")).reshape((-1, 32)) # Remove the sign bit which is the first in big-endian # all pixels are in reverse order in the group of 31 r31 = r32[:, -1:0:-1] mask = r31.ravel()[:dim1 * dim2].reshape((dim2, dim1)) header[key] = mask continue else: err = "unsupported data type: %s" % array_type logger.error(err) raise RuntimeError(err) raw = infile.read(self.num_block * self.BUFFER_SIZE) decoded = numpy.fromstring(raw, bytecode).reshape((-1, self.BUFFER_SIZE // bpp)) # There is a bug in this format: throw away 3/4 of the read data: decoded = decoded[:, :self.PIXELS_PER_CHUNK].ravel() header[key] = decoded[:dim1 * dim2].reshape(dim2, dim1) self.header = header def read(self, fname, frame=None): """try to read image :param fname: name of the file """ self.resetvals() with self._open(fname) as infile: self._readheader(infile) self.data = self.header.pop("data_array") return self # this is not compatibility with old code: fit2dimage = Fit2dImage fabio-0.6.0/fabio/mar345image.py0000644001611600070440000004146013227357030017367 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """ Authors: ........ * Henning O. Sorensen & Erik Knudsen: Center for Fundamental Research: Metal Structures in Four Dimensions; Risoe National Laboratory; Frederiksborgvej 399; DK-4000 Roskilde; email:erik.knudsen@risoe.dk * Jon Wright, Jérôme Kieffer & Gaël Goret: European Synchrotron Radiation Facility; Grenoble (France) Supports Mar345 imaging plate and Mar555 flat panel Documentation on the format is available from: http://rayonix.com/site_media/downloads/mar345_formats.pdf """ # Get ready for python3: from __future__ import with_statement, print_function, absolute_import __authors__ = ["Henning O. Sorensen", "Erik Knudsen", "Jon Wright", "Jérôme Kieffer"] __date__ = "27/07/2017" __status__ = "production" __copyright__ = "2007-2009 Risoe National Laboratory; 2010-2016 ESRF" __licence__ = "MIT" import struct import time import sys import logging import numpy from .fabioimage import FabioImage logger = logging.getLogger(__name__) from .compression import compPCK, decPCK class Mar345Image(FabioImage): _need_a_real_file = True DESCRIPTION = "File format from Mar345 imaging plate and Mar555 flat panel" DEFAULT_EXTENSIONS = ["mar2300"] def __init__(self, *args, **kwargs): FabioImage.__init__(self, *args, **kwargs) self.numhigh = None self.numpixels = None self.swap_needed = None def read(self, fname, frame=None): """ Read a mar345 image""" self.filename = fname f = self._open(self.filename, "rb") self._readheader(f) if 'compressed' in self.header['Format']: self.data = decPCK(f, self.dim1, self.dim2, self.numhigh, swap_needed=self.swap_needed) else: logger.error("Cannot handle these formats yet due to lack of documentation") return None self.bytecode = numpy.uint32 f.close() return self def _readheader(self, infile=None): """ Read a mar345 image header """ # clip was not used anywhere - commented out # clip = '\x00' # using a couple of local variables inside this function f = infile h = {} # header is 4096 bytes long data = f.read(64) # the contents of the mar345 header is taken to be as # described in # http://www.mar-usa.com/support/downloads/mar345_formats.pdf # the first 64 bytes are 4-byte integers (but in the CBFlib # example image it seems to 128 bytes?) # first 4-byte integer is a marker to check endianness if struct.unpack(" 65535)[0] nb_pix = pix_location.size if nb_pix % 8 == 0: tmp = numpy.zeros((nb_pix, 2), dtype="int32") else: tmp = numpy.zeros(((nb_pix // 8 + 1) * 8, 2), dtype="int32") tmp[:nb_pix, 0] = pix_location + 1 tmp[:nb_pix, 1] = flt_data[pix_location] if self.swap_needed: tmp.byteswap(True) return tmp.tostring() def nb_overflow_pixels(self): return (self.data > 65535).sum() @staticmethod def checkData(data=None): if data is None: return None else: # enforce square image shape = data.shape assert len(shape) == 2, "image has 2 dimensions" mshape = max(shape) z = numpy.zeros((mshape, mshape), dtype=int) z[:shape[0], :shape[1]] = data return z mar345image = Mar345Image fabio-0.6.0/fabio/test/0000755001611600070440000000000013227375744015765 5ustar kiefferscisoft00000000000000fabio-0.6.0/fabio/test/testjpeg2kimage.py0000644001611600070440000001166413227357030021420 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Test JPEG 2000 format """ from __future__ import print_function, with_statement, division, absolute_import import sys import unittest import numpy try: from PIL import Image except ImportError: Image = None from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from .. import jpeg2kimage def isPilUsable(): if jpeg2kimage.PIL is None: return False try: jpeg2kimage.PIL.Image.frombytes("1", (2, 2), b"", decoder_name='jpeg2k') except Exception as e: if e.args[0] == "decoder jpeg2k not available": return False return True def isGlymurUsable(): if jpeg2kimage.glymur is None: return False import glymur if glymur.version.openjpeg_version_tuple < [1, 5, 0]: return False return True class TestJpeg2KImage(unittest.TestCase): """Test the class format""" def setUp(self): if not isPilUsable() and not isGlymurUsable(): self.skipTest("PIL nor glymur are available") def loadImage(self, filename): image_format = jpeg2kimage.Jpeg2KImage() image = image_format.read(filename) return image def test_open_uint8(self): filename = "binned_data_uint8.jp2" filename = UtilsTest.getimage(filename + ".bz2")[:-4] image = self.loadImage(filename) self.assertEqual(image.data.shape, (120, 120)) self.assertEqual(image.data.dtype, numpy.uint8) def test_open_uint16(self): filename = "binned_data_uint16.jp2" filename = UtilsTest.getimage(filename + ".bz2")[:-4] image_format = jpeg2kimage.Jpeg2KImage() image = image_format.read(filename) self.assertEqual(image.data.shape, (120, 120)) self.assertEqual(image.data.dtype, numpy.uint16) def test_open_wrong_format(self): filename = "MultiFrame.edf" filename = UtilsTest.getimage(filename + ".bz2")[:-4] image_format = jpeg2kimage.Jpeg2KImage() try: _image = image_format.read(filename) self.fail() except IOError: pass def test_open_missing_file(self): filename = "___missing_file___.___" image_format = jpeg2kimage.Jpeg2KImage() try: _image = image_format.read(filename) self.fail() except IOError: pass class TestJpeg2KImage_PIL(TestJpeg2KImage): """Test the class format using a specific decoder""" def setUp(self): if not isPilUsable() and not isGlymurUsable(): self.skipTest("PIL is not available") @classmethod def setUpClass(cls): # Remove other decoders cls.old = jpeg2kimage.glymur jpeg2kimage.glymur = None @classmethod def tearDownClass(cls): # Remove other decoders jpeg2kimage.glymur = cls.old cls.old = None class TestJpeg2KImage_glymur(TestJpeg2KImage): """Test the class format using a specific decoder""" def setUp(self): if not isGlymurUsable(): self.skipTest("glymur is not available") @classmethod def setUpClass(cls): # Remove other decoders cls.old = jpeg2kimage.PIL jpeg2kimage.PIL = None @classmethod def tearDownClass(cls): # Remove other decoders jpeg2kimage.PIL = cls.old cls.old = None class TestJpeg2KImage_fabio(TestJpeg2KImage): """Test the format inside the fabio framework""" def loadImage(self, filename): """Use the fabio API instead of using the image format""" image = fabio.open(filename) return image def suite(): loader = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loader(TestJpeg2KImage)) testsuite.addTest(loader(TestJpeg2KImage_PIL)) testsuite.addTest(loader(TestJpeg2KImage_glymur)) testsuite.addTest(loader(TestJpeg2KImage_fabio)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testGEimage.py0000644001611600070440000000552513227357030020530 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ # Unit tests # builds on stuff from ImageD11.test.testpeaksearch 28/11/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.GEimage import GEimage # filename dim1 dim2 min max mean stddev TESTIMAGES = """GE_aSI_detector_image_1529 2048 2048 1515 16353 1833.0311 56.9124 GE_aSI_detector_image_1529.gz 2048 2048 1515 16353 1833.0311 56.9124 GE_aSI_detector_image_1529.bz2 2048 2048 1515 16353 1833.0311 56.9124""" class TestGE(unittest.TestCase): def setUp(self): """ download images """ self.GE = UtilsTest.getimage("GE_aSI_detector_image_1529.bz2") def test_read(self): for line in TESTIMAGES.split("\n"): vals = line.split() name = vals[0] dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] obj = GEimage() obj.read(os.path.join(os.path.dirname(self.GE), name)) self.assertAlmostEqual(mini, obj.getmin(), 4, "getmin") self.assertAlmostEqual(maxi, obj.getmax(), 4, "getmax") self.assertAlmostEqual(mean, obj.getmean(), 4, "getmean") self.assertAlmostEqual(stddev, obj.getstddev(), 4, "getstddev") self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestGE)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/test_flat_binary.py0000644001611600070440000000555613227357030021667 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Test cases for the flat binary images testsuite by Jerome Kieffer (Jerome.Kieffer@esrf.eu) 28/11/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] class TestFlatBinary(unittest.TestCase): filenames = [os.path.join(UtilsTest.tempdir, i) for i in ("not.a.file", "bad_news_1234", "empty_files_suck_1234.edf", "notRUBY_1234.dat")] def setUp(self): for filename in self.filenames: with open(filename, "wb") as f: # A 2048 by 2048 blank image f.write("\0x0" * 2048 * 2048 * 2) def test_openimage(self): """ test the opening of "junk" empty images ... JK: I wonder if this test makes sense ! """ nfail = 0 for filename in self.filenames: try: im = fabio.open(filename) if im.data.tostring() != "\0x0" * 2048 * 2048 * 2: nfail += 1 else: logger.info("**** Passed: %s" % filename) except: logger.warning("failed for: %s" % filename) nfail += 1 self.assertEqual(nfail, 0, " %s failures out of %s" % (nfail, len(self.filenames))) def tearDown(self): for filename in self.filenames: os.remove(filename) def suite(): # loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() # testsuite.addTest(loadTests(TestFlatBinary)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testbrukerimage.py0000644001611600070440000001774013227357030021531 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ #bruker Unit tests #built on testbrukerimage 19/01/2015 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import os import numpy if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) from ..brukerimage import brukerimage from .. import fabioutils # this is actually a violation of the bruker format since the order of # the header items is specified # in the standard, whereas the order of a python dictionary is not MYHEADER = {"FORMAT": '86', 'NPIXELB': '2', 'VERSION': '9', 'HDRBLKS': '5', 'NOVERFL': '4', 'NCOLS': '256', 'NROWS': '256', 'WORDORD': '0'} MYIMAGE = numpy.ones((256, 256), numpy.uint16) * 16 MYIMAGE[0, 0] = 0 MYIMAGE[1, 1] = 32 MYIMAGE[127:129, 127:129] = 65535 if not numpy.little_endian: MYIMAGE.byteswap(True) OVERFLOWS = [ ["%09d" % 4194304, ("%07d" % (127 * 256 + 127))], ["%09d" % 4194304, ("%07d" % (127 * 256 + 128))], ["%09d" % 4194304, ("%07d" % (128 * 256 + 127))], ["%09d" % 4194304, ("%07d" % (128 * 256 + 128))] ] class TestBruker(unittest.TestCase): """basic test""" def setUp(self): """ Generate a test bruker image """ self.filename = os.path.join(UtilsTest.tempdir, "image.0000") with open(self.filename, 'wb') as fout: wrb = 0 for key, val in MYHEADER.items(): fout.write((("%-7s" % key) + ':' + ("%-72s" % val)).encode("ASCII")) wrb = wrb + 80 hdrblks = int(MYHEADER['HDRBLKS']) while (wrb < hdrblks * 512): fout.write(b"\x1a\x04") fout.write(b'.' * 78) wrb = wrb + 80 fout.write(MYIMAGE.tostring()) noverfl = int(MYHEADER['NOVERFL']) for ovf in OVERFLOWS: fout.write((ovf[0] + ovf[1]).encode("ASCII")) fout.write(b'.' * (512 - (16 * noverfl) % 512)) def tearDown(self): unittest.TestCase.tearDown(self) if os.path.exists(self.filename): os.unlink(self.filename) def test_read(self): """ see if we can read the test image """ obj = brukerimage() obj.read(self.filename) self.assertAlmostEqual(obj.getmean(), 272.0, 2) self.assertEqual(obj.getmin(), 0) self.assertEqual(obj.getmax(), 4194304) class TestBzipBruker(TestBruker): """ test for a bzipped image """ def setUp(self): """ create the image """ TestBruker.setUp(self) if os.path.isfile(self.filename + ".bz2"): os.unlink(self.filename + ".bz2") with fabioutils.BZ2File(self.filename + ".bz2", "wb") as wf: with open(self.filename, "rb") as rf: wf.write(rf.read()) self.filename = self.filename + ".bz2" class TestGzipBruker(TestBruker): """ test for a gzipped image """ def setUp(self): """ Create the image """ TestBruker.setUp(self) if os.path.isfile(self.filename + ".gz"): os.unlink(self.filename + ".gz") with fabioutils.GzipFile(self.filename + ".gz", "wb") as wf: with open(self.filename, "rb") as rf: wf.write(rf.read()) self.filename = self.filename + ".gz" class TestBrukerLinear(unittest.TestCase): """basic test, test a random array of float32""" def setUp(self): unittest.TestCase.setUp(self) self.filename = os.path.join(UtilsTest.tempdir, "bruker.0000") self.data = numpy.random.random((500, 550)).astype("float32") def test_linear(self): """ test for self consistency of random data read/write """ obj = brukerimage(data=self.data) obj.write(self.filename) new = brukerimage() new.read(self.filename) error = abs(new.data - self.data).max() self.assertTrue(error < numpy.finfo(numpy.float32).eps, "Error is %s>1e-7" % error) def tearDown(self): unittest.TestCase.tearDown(self) if os.path.exists(self.filename): os.unlink(self.filename) # statistics come from fit2d I think # filename dim1 dim2 min max mean stddev TESTIMAGES = """Cr8F8140k103.0026 512 512 0 145942 289.37 432.17 Cr8F8140k103.0026.gz 512 512 0 145942 289.37 432.17 Cr8F8140k103.0026.bz2 512 512 0 145942 289.37 432.17""" class TestRealImg(unittest.TestCase): """ check some read data from bruker detector""" def setUp(self): """ download images """ self.im_dir = os.path.dirname(UtilsTest.getimage("Cr8F8140k103.0026.bz2")) def tearDown(self): unittest.TestCase.tearDown(self) self.im_dir = None def test_read(self): """ check we can read bruker images""" for line in TESTIMAGES.split("\n"): vals = line.split() name = vals[0] dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] obj = brukerimage() obj.read(os.path.join(self.im_dir, name)) self.assertAlmostEqual(mini, obj.getmin(), 2, "getmin") self.assertAlmostEqual(maxi, obj.getmax(), 2, "getmax") self.assertAlmostEqual(mean, obj.getmean(), 2, "getmean") self.assertAlmostEqual(stddev, obj.getstddev(), 2, "getstddev") self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") def test_write(self): "Test writing with self consistency at the fabio level" for line in TESTIMAGES.split("\n"): vals = line.split() name = vals[0] obj = brukerimage() ref = brukerimage() fname = os.path.join(self.im_dir, name) obj.read(fname) obj.write(os.path.join(UtilsTest.tempdir, name)) other = brukerimage() other.read(os.path.join(UtilsTest.tempdir, name)) ref.read(fname) self.assertEqual(abs(obj.data - other.data).max(), 0, "data are the same") for key in ref.header: if key in ("filename",): continue if key not in other.header: logger.warning("Key %s is missing in new header, was %s" % (key, ref.header[key])) else: self.assertEqual(ref.header[key], other.header[key], "value are the same for key %s: was %s now %s" % (key, ref.header[key], other.header[key])) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestBruker)) testsuite.addTest(loadTests(TestBzipBruker)) testsuite.addTest(loadTests(TestGzipBruker)) testsuite.addTest(loadTests(TestRealImg)) testsuite.addTest(loadTests(TestBrukerLinear)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testspeimage.py0000644001611600070440000001540413227357030021021 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # Copyright (C) 2016 Univeristy Köln, Germany # # Principal author: Clemens Prescher (c.prescher@uni-koeln.de) # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # Get ready for python3: from __future__ import with_statement, print_function, division __authors__ = ["Clemens Prescher"] __contact__ = "c.prescher@uni-koeln.de" __license__ = "MIT" __copyright__ = "Clemens Prescher/Univeristy Köln, Germany" __date__ = "27/07/2017" import unittest import sys import numpy from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.speimage import SpeImage class TestSpeImage(unittest.TestCase): @classmethod def setUpClass(cls): super(TestSpeImage, cls).setUpClass() cls.v2_spe_filename = UtilsTest.getimage('v2.SPE.bz2')[:-4] cls.v2_converted_spe_filename = UtilsTest.getimage('v2_converted.SPE.bz2')[:-4] cls.v3_spe_filename = UtilsTest.getimage('v3.spe.bz2')[:-4] cls.v3_custom_roi_filename = UtilsTest.getimage('v3_custom_roi.spe.bz2')[:-4] cls.v3_2frames_filename = UtilsTest.getimage('v3_2frames.spe.bz2')[:-4] @classmethod def tearDownClass(cls): super(TestSpeImage, cls).tearDownClass() def setUp(self): self.v2_spe_file = SpeImage() self.v2_spe_file.read(self.v2_spe_filename) self.v3_spe_file = SpeImage() self.v3_spe_file.read(self.v3_spe_filename) self.v2_converted_spe_file = SpeImage() self.v2_converted_spe_file.read(self.v2_converted_spe_filename) def tearDown(self): unittest.TestCase.tearDown(self) # free the associated memory self.v2_spe_file = self.v3_spe_file = self.v2_converted_spe_file = None def test_reading_version2_spe(self): self.assertEqual(self.v2_spe_file.header['version'], 2) self.assertEqual(self.v3_spe_file.header['version'], 3) self.assertEqual(self.v2_converted_spe_file.header['version'], 3) def test_calibration(self): self.assertGreater(len(self.v2_spe_file.header['x_calibration']), 0) self.assertGreater(len(self.v3_spe_file.header['x_calibration']), 0) self.assertGreater(len(self.v2_converted_spe_file.header['x_calibration']), 0) # def test_time(self): self.assertEqual(self.v2_spe_file.header['time'], "07/13/2013 19:42:23") self.assertEqual(self.v3_spe_file.header['time'], "09/06/2013 16:50:39.445678") self.assertEqual(self.v2_converted_spe_file.header['time'], "05/10/2013 10:34:27") def test_exposure_time(self): self.assertEqual(self.v2_spe_file.header['exposure_time'], 0.5) self.assertEqual(self.v3_spe_file.header['exposure_time'], 0.1) self.assertEqual(self.v2_converted_spe_file.header['exposure_time'], 0.18) def test_detector(self): self.assertEqual(self.v2_spe_file.header['detector'], 'unspecified') self.assertEqual(self.v3_spe_file.header['detector'], "PIXIS: 100BR") self.assertEqual(self.v2_converted_spe_file.header['detector'], 'unspecified') def test_grating(self): self.assertEqual(self.v2_spe_file.header['grating'], '300.0') self.assertEqual(self.v3_spe_file.header['grating'], '860nm 300') self.assertEqual(self.v2_converted_spe_file.header['grating'], '300.0') def test_center_wavelength(self): self.assertEqual(self.v2_spe_file.header['center_wavelength'], 750) self.assertEqual(self.v3_spe_file.header['center_wavelength'], 500) self.assertEqual(self.v2_converted_spe_file.header['center_wavelength'], 750) def test_roi(self): self.assertEqual(self.v3_spe_file.header['roi'], (0, 1024, 0, 100)) self.v3_custom_region = SpeImage() self.v3_custom_region.read(self.v3_custom_roi_filename) self.assertEqual(self.v3_custom_region.header['roi'], (100, 600, 10, 60)) self.assertEqual(len(self.v3_custom_region.header['x_calibration']), self.v3_custom_region.header['x_dim']) def test_read_data(self): self.assertEqual(self.v2_spe_file.data.shape, (100, 1340)) self.assertEqual(self.v3_spe_file.data.shape, (100, 1024)) self.assertEqual(self.v2_converted_spe_file.data.shape, (100, 1340)) def test_multiple_frames(self): self.v3_2frames_file = SpeImage() self.v3_2frames_file.read(self.v3_2frames_filename) self.assertEqual(self.v3_2frames_file.data.shape, (255, 1024)) frame1 = self.v3_2frames_file.data self.v3_2frames_file.read(self.v3_2frames_filename, 1) frame2 = self.v3_2frames_file.data self.assertFalse(numpy.array_equal(frame1, frame2)) self.assertEqual(frame1.shape, frame2.shape) def test_fabio_integration(self): v2_file = fabio.open(self.v2_spe_filename) v3_file = fabio.open(self.v3_spe_filename) v2_file_gz = fabio.open(self.v2_spe_filename + ".gz") v3_file_gz = fabio.open(self.v3_spe_filename + ".gz") v2_file_bz = fabio.open(self.v2_spe_filename + ".bz2") v3_file_bz = fabio.open(self.v3_spe_filename + ".bz2") self.assertEqual(abs(v2_file.data - v2_file_gz.data).max(), 0, "v2/gz") self.assertEqual(abs(v3_file.data - v3_file_gz.data).max(), 0, "v3/gz") self.assertEqual(abs(v2_file.data - v2_file_bz.data).max(), 0, "v2/bz") self.assertEqual(abs(v3_file.data - v3_file_bz.data).max(), 0, "v3/bz") def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestSpeImage)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testmccdimage.py0000644001611600070440000001066513227357030021144 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ # Unit tests # builds on stuff from ImageD11.test.testpeaksearch 28/11/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os import numpy if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from ..tifimage import tifimage from ..marccdimage import marccdimage # statistics come from fit2d I think # filename dim1 dim2 min max mean stddev TESTIMAGES = """corkcont2_H_0089.mccd 2048 2048 0 354 7.2611 14.639 corkcont2_H_0089.mccd.bz2 2048 2048 0 354 7.2611 14.639 corkcont2_H_0089.mccd.gz 2048 2048 0 354 7.2611 14.639 somedata_0001.mccd 1024 1024 0 20721 128.37 136.23 somedata_0001.mccd.bz2 1024 1024 0 20721 128.37 136.23 somedata_0001.mccd.gz 1024 1024 0 20721 128.37 136.23""" class TestNormalTiffOK(unittest.TestCase): """ check we can read normal tifs as well as mccd """ def setUp(self): """ create an image """ self.image = os.path.join(UtilsTest.tempdir, "tifimagewrite_test0000.tif") self.imdata = numpy.zeros((24, 24), numpy.uint16) self.imdata[12:14, 15:17] = 42 obj = tifimage(self.imdata, {}) obj.write(self.image) def test_read_openimage(self): from fabio.openimage import openimage obj = openimage(self.image) if obj.data.astype(int).tostring() != self.imdata.astype(int).tostring(): logger.info("%s %s" % (type(self.imdata), self.imdata.dtype)) logger.info("%s %s" % (type(obj.data), obj.data.dtype)) logger.info("%s %s" % (obj.data - self.imdata)) self.assertEqual(obj.data.astype(int).tostring(), self.imdata.astype(int).tostring()) def tearDown(self): unittest.TestCase.tearDown(self) os.unlink(self.image) class TestFlatMccds(unittest.TestCase): def setUp(self): self.fn = {} for i in ["corkcont2_H_0089.mccd", "somedata_0001.mccd"]: self.fn[i] = UtilsTest.getimage(i + ".bz2")[:-4] self.fn[i + ".bz2"] = self.fn[i] + ".bz2" self.fn[i + ".gz"] = self.fn[i] + ".gz" for i in self.fn: assert os.path.exists(self.fn[i]) def test_read(self): """ check we can read MarCCD images""" for line in TESTIMAGES.split("\n"): vals = line.split() name = vals[0] dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] obj = marccdimage() obj.read(self.fn[name]) self.assertAlmostEqual(mini, obj.getmin(), 2, "getmin") self.assertAlmostEqual(maxi, obj.getmax(), 2, "getmax") self.assertAlmostEqual(mean, obj.getmean(), 2, "getmean") self.assertAlmostEqual(stddev, obj.getstddev(), 2, "getstddev") self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestFlatMccds)) testsuite.addTest(loadTests(TestNormalTiffOK)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/__init__.py0000755001611600070440000000317213227357030020070 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Test module FabIO """ __authors__ = ["Jérôme Kieffer"] __contact__ = "jerome.kieffer@esrf.eu" __license__ = "GPLv3+" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __data__ = "30/10/2015" import sys import unittest from . import utilstest from . import test_all def suite(): test_suite = unittest.TestSuite() test_suite.addTest(test_all.suite()) return test_suite def run_tests(): """Run test complete test_suite""" mysuite = test_all.suite() runner = unittest.TextTestRunner() if not runner.run(mysuite).wasSuccessful(): print("Test suite failed") return 1 else: print("Test suite succeeded") return 0 fabio-0.6.0/fabio/test/utilstest.py0000644001611600070440000002534113227357030020370 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # coding: utf-8 # # Project: FabIO tests class utilities # # Copyright (C) 2010-2016 European Synchrotron Radiation Facility # Grenoble, France # # Principal authors: Jérôme KIEFFER (jerome.kieffer@esrf.fr) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # from __future__ import print_function, division, absolute_import, with_statement __author__ = "Jérôme Kieffer" __contact__ = "jerome.kieffer@esrf.eu" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __date__ = "11/08/2017" PACKAGE = "fabio" DATA_KEY = "FABIO_DATA" import os import sys import getpass import threading import logging import bz2 from ..third_party import gzip import json import tempfile try: # Python3 from urllib.request import urlopen, ProxyHandler, build_opener except ImportError: # Python2 from urllib2 import urlopen, ProxyHandler, build_opener logging.basicConfig(level=logging.WARNING) logger = logging.getLogger("%s.utilstest" % PACKAGE) TEST_HOME = os.path.dirname(os.path.abspath(__file__)) class UtilsTest(object): """ Static class providing useful stuff for preparing tests. """ options = None timeout = 60 # timeout in seconds for downloading images # url_base = "http://downloads.sourceforge.net/fable" url_base = "http://www.edna-site.org/pub/fabio/testimages" sem = threading.Semaphore() recompiled = False reloaded = False name = PACKAGE script_dir = None try: fabio = __import__("%s.directories" % name) image_home = fabio.directories.testimages except Exception as err: logger.warning("in loading directories %s", err) image_home = None else: image_home = fabio.directories.testimages if image_home is None: image_home = os.path.join(tempfile.gettempdir(), "%s_testimages_%s" % (name, getpass.getuser())) if not os.path.exists(image_home): os.makedirs(image_home) testimages = os.path.join(image_home, "all_testimages.json") if os.path.exists(testimages): with open(testimages) as f: ALL_DOWNLOADED_FILES = set(json.load(f)) else: ALL_DOWNLOADED_FILES = set() tempdir = tempfile.mkdtemp("_" + getpass.getuser(), name + "_") @classmethod def deep_reload(cls): cls.fabio = __import__(cls.name) return cls.fabio @classmethod def forceBuild(cls, remove_first=True): """ Force the recompilation of FabIO Nonesense, kept for legacy reasons """ return @classmethod def timeoutDuringDownload(cls, imagename=None): """ Function called after a timeout in the download part ... just raise an Exception. """ if imagename is None: imagename = "2252/testimages.tar.bz2 unzip it " raise RuntimeError("Could not automatically \ download test images!\n \ If you are behind a firewall, \ please set both environment variable http_proxy and https_proxy.\ This even works under windows ! \n \ Otherwise please try to download the images manually from \n %s/%s and put it in in test/testimages." % (cls.url_base, imagename)) @classmethod def getimage(cls, imagename): """ Downloads the requested image from Forge.EPN-campus.eu :param str imagename: name of the image. For the RedMine forge, the filename contains a directory name that is removed :return: full path of the locally saved file """ if imagename not in cls.ALL_DOWNLOADED_FILES: cls.ALL_DOWNLOADED_FILES.add(imagename) image_list = list(cls.ALL_DOWNLOADED_FILES) image_list.sort() try: with open(cls.testimages, "w") as fp: json.dump(image_list, fp, indent=4) except IOError: logger.debug("Unable to save JSON list") baseimage = os.path.basename(imagename) logger.info("UtilsTest.getimage('%s')" % baseimage) if not os.path.exists(cls.image_home): os.makedirs(cls.image_home) fullimagename = os.path.abspath(os.path.join(cls.image_home, baseimage)) if os.path.exists(fullimagename): return fullimagename if baseimage.endswith(".bz2"): bzip2name = baseimage basename = baseimage[:-4] gzipname = basename + ".gz" elif baseimage.endswith(".gz"): gzipname = baseimage basename = baseimage[:-3] bzip2name = basename + ".bz2" else: basename = baseimage gzipname = baseimage + "gz2" bzip2name = basename + ".bz2" fullimagename_gz = os.path.abspath(os.path.join(cls.image_home, gzipname)) fullimagename_raw = os.path.abspath(os.path.join(cls.image_home, basename)) fullimagename_bz2 = os.path.abspath(os.path.join(cls.image_home, bzip2name)) data = None if not os.path.isfile(fullimagename_bz2): logger.info("Trying to download image %s, timeout set to %ss", bzip2name, cls.timeout) dictProxies = {} if "http_proxy" in os.environ: dictProxies['http'] = os.environ["http_proxy"] dictProxies['https'] = os.environ["http_proxy"] if "https_proxy" in os.environ: dictProxies['https'] = os.environ["https_proxy"] if dictProxies: proxy_handler = ProxyHandler(dictProxies) opener = build_opener(proxy_handler).open else: opener = urlopen logger.info("wget %s/%s" % (cls.url_base, imagename)) data = opener("%s/%s" % (cls.url_base, imagename), data=None, timeout=cls.timeout).read() logger.info("Image %s successfully downloaded." % baseimage) try: with open(fullimagename_bz2, "wb") as outfile: outfile.write(data) except IOError: raise IOError("unable to write downloaded \ data to disk at %s" % cls.image_home) if not os.path.isfile(fullimagename_bz2): raise RuntimeError("Could not automatically \ download test images %s!\n \ If you are behind a firewall, \ please set the environment variable http_proxy.\n \ Otherwise please try to download the images manually from \n \ %s" % (cls.url_base, imagename)) if not os.path.isfile(fullimagename_raw) or\ not os.path.isfile(fullimagename_gz): if data is None: data = open(fullimagename_bz2, "rb").read() decompressed = bz2.decompress(data) if not os.path.exists(fullimagename_raw): try: open(fullimagename_raw, "wb").write(decompressed) except IOError: raise IOError("unable to write decompressed \ data to disk at %s" % cls.image_home) if not os.path.exists(fullimagename_gz): try: gzip.open(fullimagename_gz, "wb").write(decompressed) except IOError: raise IOError("unable to write gzipped \ data to disk at %s" % cls.image_home) return fullimagename @classmethod def download_images(cls, imgs=None): """ Download all images needed for the test/benchmarks :param imgs: list of files to download """ if not imgs: imgs = cls.ALL_DOWNLOADED_FILES for fn in imgs: print("Downloading from internet: %s" % fn) if fn[-4:] != ".bz2": if fn[-3:] == ".gz": fn = fn[:-2] + "bz2" else: fn = fn + ".bz2" print(" actually " + fn) cls.getimage(fn) @classmethod def get_logger(cls, filename=__file__): """ small helper function that initialized the logger and returns it """ basename = os.path.basename(os.path.abspath(filename)) basename = os.path.splitext(basename)[0] level = logging.root.level mylogger = logging.getLogger(basename) logger.setLevel(level) mylogger.setLevel(level) mylogger.debug("tests loaded from file: %s" % basename) return mylogger @classmethod def script_path(cls, script): """ Returns the path of the executable and the associated environment In Windows, it checks availability of script using .py .bat, and .exe file extensions. """ if (sys.platform == "win32"): available_extensions = [".py", ".bat", ".exe"] else: available_extensions = [""] env = dict((str(k), str(v)) for k, v in os.environ.items()) env["PYTHONPATH"] = os.pathsep.join(sys.path) paths = os.environ.get("PATH", "").split(os.pathsep) if cls.script_dir is not None: paths.insert(0, cls.script_dir) for base in paths: # clean up extra quotes from paths if base.startswith('"') and base.endswith('"'): base = base[1:-1] for file_extension in available_extensions: script_path = os.path.join(base, script + file_extension) if os.path.exists(script_path): # script found return script_path, env # script not found logger.warning("Script '%s' not found in paths: %s", script, ":".join(paths)) script_path = script return script_path, env fabio-0.6.0/fabio/test/testdm3image.py0000644001611600070440000000636013227357030020716 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ # Unit tests # builds on stuff from ImageD11.test.testpeaksearch Updated by Jerome Kieffer (jerome.kieffer@esrf.eu), 2011 """ from __future__ import print_function, with_statement, division, absolute_import __date__ = "29/09/2017" __author__ = "jerome Kieffer" import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.dm3image import Dm3Image # statistics come from fit2d I think # filename dim1 dim2 min max mean stddev TESTIMAGES = """ref_d20x_310mm.dm3 2048 2048 -31842.354 23461.672 569.38782 1348.4183 ref_d20x_310mm.dm3.gz 2048 2048 -31842.354 23461.672 569.38782 1348.4183 ref_d20x_310mm.dm3.bz2 2048 2048 -31842.354 23461.672 569.38782 1348.4183""" class TestDm3Image(unittest.TestCase): """ """ def setUp(self): """ Download images """ self.im_dir = os.path.dirname(UtilsTest.getimage("ref_d20x_310mm.dm3.bz2")) def test_read(self): """ check we can read dm3 images""" for line in TESTIMAGES.split("\n"): vals = line.split() name = vals[0] dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] fname = os.path.join(self.im_dir, name) obj1 = Dm3Image() obj1.read(fname) obj2 = fabio.open(fname) for obj in (obj1, obj2): self.assertAlmostEqual(mini, obj.getmin(), 2, "getmin") self.assertAlmostEqual(maxi, obj.getmax(), 2, "getmax") got_mean = obj.getmean() self.assertAlmostEqual(mean, got_mean, 2, "getmean exp %s != got %s" % (mean, got_mean)) self.assertAlmostEqual(stddev, obj.getstddev(), 2, "getstddev") self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestDm3Image)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testhdf5image.py0000644001611600070440000000632613227357030021063 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # """Test Eiger images """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.fabioutils import exists from fabio.openimage import openimage from fabio.hdf5image import Hdf5Image, h5py def make_hdf5(name, shape=(50, 99, 101)): if h5py is None: raise unittest.SkipTest("h5py is not available") with h5py.File(name) as h: e = h.require_group("entry") if len(shape) == 2: e.require_dataset("data", shape, compression="gzip", compression_opts=9, dtype="float32") elif len(shape) == 3: e.require_dataset("data", shape, chunks=(1,) + shape[1:], compression="gzip", compression_opts=9, dtype="float32") return name + "::entry/data" class TestHdf5(unittest.TestCase): """basic test""" @classmethod def setUpClass(cls): super(TestHdf5, cls).setUpClass() cls.fn2 = os.path.join(UtilsTest.tempdir, "eiger2d.h5") cls.fn2 = make_hdf5(cls.fn2, (99, 101)) cls.fn3 = os.path.join(UtilsTest.tempdir, "eiger3d.h5") cls.fn3 = make_hdf5(cls.fn3, (50, 99, 101)) @classmethod def tearDownClass(cls): super(TestHdf5, cls).tearDownClass() if exists(cls.fn3): os.unlink(cls.fn3.split("::")[0]) if exists(cls.fn2): os.unlink(cls.fn2.split("::")[0]) def test_read(self): """ check we can read images from Eiger""" e = Hdf5Image() e.read(self.fn2) self.assertEqual(e.dim1, 101, "dim1 OK") self.assertEqual(e.dim2, 99, "dim2 OK") self.assertEqual(e.nframes, 1, "nframes OK") self.assertEqual(e.bpp, 4, "nframes OK") e = Hdf5Image() e.read(self.fn3) self.assertEqual(e.dim1, 101, "dim1 OK") self.assertEqual(e.dim2, 99, "dim2 OK") self.assertEqual(e.nframes, 50, "nframes OK") self.assertEqual(e.bpp, 4, "nframes OK") def test_open(self): """ check we can read images from Eiger""" e = openimage(self.fn2) self.assertEqual(e.dim1, 101, "dim1 OK") self.assertEqual(e.dim2, 99, "dim2 OK") self.assertEqual(e.nframes, 1, "nframes OK") self.assertEqual(e.bpp, 4, "nframes OK") e = openimage(self.fn3) self.assertEqual(e.dim1, 101, "dim1 OK") self.assertEqual(e.dim2, 99, "dim2 OK") self.assertEqual(e.nframes, 50, "nframes OK") self.assertEqual(e.bpp, 4, "nframes OK") def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestHdf5)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testfabioconvert.py0000644001611600070440000001622213227357030021707 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Valentin Valls (valentin.valls@esrf.fr) # """ Test for fabio-convert """ import fabio import numpy import os.path import tempfile import shutil import sys import subprocess import time import unittest import os import fabio.app.convert class TestFabioConvert(unittest.TestCase): def create_test_env(self): path = tempfile.mkdtemp() os.mkdir(os.path.join(path, "input")) os.mkdir(os.path.join(path, "output")) data = numpy.random.rand(100, 100) image = fabio.edfimage.edfimage(data=data) image.write(os.path.join(path, "input", "01.edf")) data = numpy.random.rand(100, 100) image = fabio.edfimage.edfimage(data=data) image.write(os.path.join(path, "input", "02.edf")) data = numpy.random.rand(100, 100) image = fabio.edfimage.edfimage(data=data) image.write(os.path.join(path, "input", "03.edf")) data = numpy.random.rand(100, 100) image = fabio.edfimage.edfimage(data=data) # it is not the right file format, but it makes no difference image.write(os.path.join(path, "output", "01.msk")) data = numpy.random.rand(100, 100) image = fabio.edfimage.edfimage(data=data) # it is not the right file format, but it makes no difference image.write(os.path.join(path, "output", "02.msk")) t = time.time() older = (t - 5000, t - 5000) default = (t - 4000, t - 4000) newer = (t - 3000, t - 3000) os.utime(os.path.join(path, "input", "01.edf"), default) os.utime(os.path.join(path, "input", "02.edf"), default) os.utime(os.path.join(path, "input", "03.edf"), default) os.utime(os.path.join(path, "output", "01.msk"), older) os.utime(os.path.join(path, "output", "02.msk"), newer) return path def clean_test_env(self, path): shutil.rmtree(path) def setUp(self): self.__oldPath = os.getcwd() self.__testPath = self.create_test_env() os.chdir(self.__testPath) env = dict((str(k), str(v)) for k, v in os.environ.items()) env["PYTHONPATH"] = os.pathsep.join(sys.path) self.__env = env self.__script = fabio.app.convert.__file__ def tearDown(self): os.chdir(self.__oldPath) self.clean_test_env(self.__testPath) self.exe, self.env = None, None def subprocessFabioConvert(self, *args): commandLine = [sys.executable, self.__script] commandLine.extend(args) return subprocess.Popen(commandLine, env=self.__env, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE) def testSingleFile(self): p = self.subprocessFabioConvert("input/03.edf", "-o=output/03.msk") p.communicate() assert(os.path.exists("output/03.msk")) image = fabio.open("output/03.msk") assert(isinstance(image, fabio.fit2dmaskimage.Fit2dMaskImage)) def testSingleFileToDir(self): p = self.subprocessFabioConvert("input/03.edf", "-F=msk", "-o=output") p.communicate() assert(os.path.exists("output/03.msk")) def testSingleFileWithWildcardToDir(self): p = self.subprocessFabioConvert("input/03.*", "-F=msk", "-o=output") p.communicate() assert(os.path.exists("output/03.msk")) def testFullFormatName(self): p = self.subprocessFabioConvert("input/03.*", "-F=numpyimage", "-o=output") p.communicate() assert(os.path.exists("output/03.npy")) image = fabio.open("output/03.npy") assert(isinstance(image, fabio.numpyimage.NumpyImage)) def testForceOption(self): date1 = os.path.getmtime("output/01.msk") date2 = os.path.getmtime("output/02.msk") p = self.subprocessFabioConvert("input/*.edf", "-f", "-F=msk", "-o=output") p.communicate() assert(os.path.exists("output/01.msk")) assert(date1 < os.path.getmtime("output/01.msk")) assert(os.path.exists("output/02.msk")) assert(date2 < os.path.getmtime("output/02.msk")) assert(os.path.exists("output/03.msk")) def testRemoveDestinationOption(self): date1 = os.path.getmtime("output/01.msk") date2 = os.path.getmtime("output/02.msk") p = self.subprocessFabioConvert("input/*.edf", "--remove-destination", "-F=msk", "-o=output") p.communicate() assert(os.path.exists("output/01.msk")) assert(date1 < os.path.getmtime("output/01.msk")) assert(os.path.exists("output/02.msk")) assert(date2 < os.path.getmtime("output/02.msk")) assert(os.path.exists("output/03.msk")) def testNoClobberOption(self): date1 = os.path.getmtime("output/01.msk") date2 = os.path.getmtime("output/02.msk") p = self.subprocessFabioConvert("input/*.edf", "-n", "-F=msk", "-o=output") p.communicate() assert(os.path.exists("output/01.msk")) assert(date1 == os.path.getmtime("output/01.msk")) assert(os.path.exists("output/02.msk")) assert(date2 == os.path.getmtime("output/02.msk")) assert(os.path.exists("output/03.msk")) def testUpdateOption(self): date1 = os.path.getmtime("output/01.msk") date2 = os.path.getmtime("output/02.msk") p = self.subprocessFabioConvert("input/*.edf", "--update", "-F=msk", "-o=output") p.communicate() assert(os.path.exists("output/01.msk")) assert(date1 < os.path.getmtime("output/01.msk")) assert(os.path.exists("output/02.msk")) assert(date2 == os.path.getmtime("output/02.msk")) assert(os.path.exists("output/03.msk")) def testDefaultOption(self): date1 = os.path.getmtime("output/01.msk") date2 = os.path.getmtime("output/02.msk") p = self.subprocessFabioConvert("input/*.edf", "-F=msk", "-o=output") p.stdin.write(b'yes\n') p.stdin.write(b'no\n') p.communicate() assert(os.path.exists("output/01.msk")) assert(date1 < os.path.getmtime("output/01.msk")) assert(os.path.exists("output/02.msk")) assert(date2 == os.path.getmtime("output/02.msk")) assert(os.path.exists("output/03.msk")) def testInteractiveOption(self): date1 = os.path.getmtime("output/01.msk") date2 = os.path.getmtime("output/02.msk") p = self.subprocessFabioConvert("input/*.edf", "-n", "-i", "-F=msk", "-o=output") p.stdin.write(b'yes\n') p.stdin.write(b'no\n') p.communicate() assert(os.path.exists("output/01.msk")) assert(date1 < os.path.getmtime("output/01.msk")) assert(os.path.exists("output/02.msk")) assert(date2 == os.path.getmtime("output/02.msk")) assert(os.path.exists("output/03.msk")) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestFabioConvert)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/test_nexus.py0000644001611600070440000000526213227357030020531 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """Unit tests for nexus file reader """ from __future__ import print_function, with_statement, division, absolute_import import unittest import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) from .. import nexus class TestNexus(unittest.TestCase): def setUp(self): if nexus.h5py is None: self.skipTest("h5py library is not available. Skipping Nexus test") def test_nexus(self): "Test creation of Nexus files" fname = os.path.join(UtilsTest.tempdir, "nexus.h5") nex = nexus.Nexus(fname) entry = nex.new_entry("entry") nex.new_instrument(entry, "ID00") nex.new_detector("camera") self.assertEqual(len(nex.get_entries()), 2, "nexus file has 2 entries") nex.close() self.assertTrue(os.path.exists(fname)) os.unlink(fname) def test_from_time(self): fname = os.path.join(UtilsTest.tempdir, "nexus.h5") nex = nexus.Nexus(fname) entry = nex.new_entry("entry") time1 = nexus.from_isotime(entry["start_time"].value) entry["bad_time"] = [entry["start_time"].value] # this is a list time2 = nexus.from_isotime(entry["bad_time"].value) self.assertEqual(time1, time2, "start_time in list does not works !") nex.close() self.assertTrue(os.path.exists(fname)) os.unlink(fname) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestNexus)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testheadernotsingleton.py0000644001611600070440000000476013227357030023126 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ # Unit tests # builds on stuff from ImageD11.test.testpeaksearch 28/11/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] import shutil class TestHeaderNotSingleton(unittest.TestCase): def setUp(self): """ download images """ self.file1 = UtilsTest.getimage("mb_LP_1_001.img.bz2")[:-4] def testheader(self): file2 = self.file1.replace("mb_LP_1_001.img", "mb_LP_1_002.img") self.assertTrue(os.path.exists(self.file1)) if not os.path.exists(file2): shutil.copy(self.file1, file2) image1 = fabio.open(self.file1) image2 = fabio.open(file2) abs_norm = lambda fn: os.path.normcase(os.path.abspath(fn)) self.assertEqual(abs_norm(image1.filename), abs_norm(self.file1)) self.assertEqual(abs_norm(image2.filename), abs_norm(file2)) self.assertNotEqual(image1.filename, image2.filename) def tearDown(self): unittest.TestCase.tearDown(self) self.file1 = None def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestHeaderNotSingleton)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testnumpyimage.py0000755001611600070440000000743113227357030021406 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Test for numpy images. """ __author__ = "Jérôme Kieffer" __date__ = "27/07/2017" import os import sys import unittest if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest import numpy logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.numpyimage import NumpyImage from fabio.openimage import openimage class TestNumpy(unittest.TestCase): """basic test""" def setUp(self): """Generate files""" self.ary = numpy.random.randint(0, 6500, size=99).reshape(11, 9).astype("uint16") self.fn = os.path.join(UtilsTest.tempdir, "numpy.npy") self.fn2 = os.path.join(UtilsTest.tempdir, "numpy2.npy") numpy.save(self.fn, self.ary) def tearDown(self): unittest.TestCase.tearDown(self) for i in (self.fn, self.fn2): if os.path.exists(i): os.unlink(i) self.ary = self.fn = self.fn2 = None def test_read(self): """ check we can read pnm images""" obj = openimage(self.fn) self.assertEqual(obj.bytecode, numpy.uint16, msg="bytecode is OK") self.assertEqual(9, obj.dim1, "dim1") self.assertEqual(11, obj.dim2, "dim2") self.assertTrue(numpy.allclose(obj.data, self.ary), "data") def test_write(self): """ check we can write numpy images""" ref = NumpyImage(data=self.ary) ref.save(self.fn2) with openimage(self.fn2) as obj: self.assertEqual(obj.bytecode, numpy.uint16, msg="bytecode is OK") self.assertEqual(9, obj.dim1, "dim1") self.assertEqual(11, obj.dim2, "dim2") self.assertTrue(numpy.allclose(obj.data, self.ary), "data") def test_multidim(self): for shape in (10,), (10, 15), (10, 15, 20), (10, 15, 20, 25): ary = numpy.random.random(shape).astype("float32") numpy.save(self.fn, ary) with openimage(self.fn) as obj: self.assertEqual(obj.bytecode, numpy.float32, msg="bytecode is OK") self.assertEqual(shape[-1], obj.dim1, "dim1") dim2 = 1 if len(shape) == 1 else shape[-2] self.assertEqual(dim2, obj.dim2, "dim2") nframes = 1 if len(shape) > 2: for i in shape[:-2]: nframes *= i # print(shape,nframes, obj.nframes) self.assertEqual(nframes, obj.nframes, "nframes") if os.path.exists(self.fn): os.unlink(self.fn) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestNumpy)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testtifimage.py0000755001611600070440000001460113227357030021015 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """Tiff Unit tests""" from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] class TestTif(unittest.TestCase): # filename dim1 dim2 min max mean stddev TESTIMAGES = """ Feb09-bright-00.300s_WAXS.bz2 1042 1042 0 65535 8546.6414 1500.4198 Feb09-bright-00.300s_WAXS.gz 1042 1042 0 65535 8546.6414 1500.4198 Feb09-bright-00.300s_WAXS 1042 1042 0 65535 8546.6414 1500.4198 """ def test_read(self): """ Test the reading of Mar345 images """ for line in self.TESTIMAGES.split('\n'): vals = line.strip().split() if not vals: continue name = vals[0] logger.debug("Processing: %s" % name) dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] obj = fabio.tifimage.TifImage() obj.read(UtilsTest.getimage(name)) self.assertAlmostEqual(mini, obj.getmin(), 2, "getmin [%s,%s]" % (mini, obj.getmin())) self.assertAlmostEqual(maxi, obj.getmax(), 2, "getmax [%s,%s]" % (maxi, obj.getmax())) self.assertAlmostEqual(mean, obj.getmean(), 2, "getmean [%s,%s]" % (mean, obj.getmean())) self.assertAlmostEqual(stddev, obj.getstddev(), 2, "getstddev [%s,%s]" % (stddev, obj.getstddev())) self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") class TestTifImage_Pilatus(unittest.TestCase): def setUp(self): self.fn = {} for i in ["pilatus2M.tif", "pilatus2M.edf"]: self.fn[i] = UtilsTest.getimage(i + ".bz2") for i in self.fn: assert os.path.exists(self.fn[i]) def test1(self): """ Testing pilatus tif bug """ o1 = fabio.open(self.fn["pilatus2M.tif"]).data o2 = fabio.open(self.fn["pilatus2M.edf"]).data self.assertEqual(abs(o1 - o2).max(), 0.0) class TestTifImage_Packbits(unittest.TestCase): def setUp(self): self.fn = {} for i in ["oPPA_5grains_0001.tif", "oPPA_5grains_0001.edf"]: self.fn[i] = UtilsTest.getimage(i + ".bz2") for i in self.fn: assert os.path.exists(self.fn[i]) def test1(self): """ Testing packbit comressed data tif bug """ o1 = fabio.open(self.fn["oPPA_5grains_0001.tif"]).data o2 = fabio.open(self.fn["oPPA_5grains_0001.edf"]).data self.assertEqual(abs(o1 - o2).max(), 0.0) class TestTifImage_fit2d(unittest.TestCase): def setUp(self): self.fn = {} for i in ["fit2d.tif", "fit2d.edf"]: self.fn[i] = UtilsTest.getimage(i + ".bz2") for i in self.fn: assert os.path.exists(self.fn[i]) def test1(self): """ Testing packbit comressed data tif bug """ o1 = fabio.open(self.fn["fit2d.tif"]).data o2 = fabio.open(self.fn["fit2d.edf"]).data self.assertEqual(abs(o1 - o2).max(), 0.0) class TestTifImage_A0009(unittest.TestCase): """ test image from ??? with this error a0009.tif TIFF 1024x1024 1024x1024+0+0 16-bit Grayscale DirectClass 2MiB 0.000u 0:00.010 identify: a0009.tif: invalid TIFF directory; tags are not sorted in ascending order. `TIFFReadDirectory' @ tiff.c/TIFFWarnings/703. identify: a0009.tif: TIFF directory is missing required "StripByteCounts" field, calculating from imagelength. `TIFFReadDirectory' @ tiff.c/TIFFWarnings/703. """ def setUp(self): self.fn = {} for i in ["a0009.tif", "a0009.edf"]: self.fn[i] = UtilsTest.getimage(i + ".bz2")[:-4] for i in self.fn: assert os.path.exists(self.fn[i]) def test1(self): """ Testing packbit comressed data tif bug """ o1 = fabio.open(self.fn["a0009.tif"]).data o2 = fabio.open(self.fn["a0009.edf"]).data self.assertEqual(abs(o1 - o2).max(), 0.0) class TestGzipTif(unittest.TestCase): def setUp(self): self.unzipped = UtilsTest.getimage("oPPA_5grains_0001.tif.bz2")[:-4] self.zipped = self.unzipped + ".gz" assert os.path.exists(self.zipped) assert os.path.exists(self.unzipped) def test1(self): o1 = fabio.open(self.zipped) o2 = fabio.open(self.unzipped) self.assertEqual(o1.data[0, 0], 10) self.assertEqual(o2.data[0, 0], 10) class TestTif_Rect(unittest.TestCase): def setUp(self): self.fn = UtilsTest.getimage("testmap1_0002.tif.bz2")[:-4] def test1(self): for ext in ["", ".gz", ".bz2"]: o1 = fabio.open(self.fn + ext) self.assertEqual(o1.data.shape, (100, 120)) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestTif)) testsuite.addTest(loadTests(TestGzipTif)) testsuite.addTest(loadTests(TestTif_Rect)) testsuite.addTest(loadTests(TestTifImage_A0009)) testsuite.addTest(loadTests(TestTifImage_fit2d)) testsuite.addTest(loadTests(TestTifImage_Packbits)) testsuite.addTest(loadTests(TestTifImage_Pilatus)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testfit2dimage.py0000644001611600070440000000703213227357030021240 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: FabIO X-ray image reader # # Copyright (C) 2010-2016 European Synchrotron Radiation Facility # Grenoble, France # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """Test for FabIO reader for Fit2D binary images """ # Get ready for python3: from __future__ import with_statement, print_function, division, absolute_import __authors__ = ["Jérôme Kieffer"] __contact__ = "jerome.kiefer@esrf.fr" __license__ = "MIT" __copyright__ = "2016-2016 European Synchrotron Radiation Facility" __date__ = "27/07/2017" import unittest import sys import os import numpy if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.fit2dimage import fit2dimage class TestFit2DImage(unittest.TestCase): """ A few random clicks to make a test mask """ def setUp(self): """ download images """ self.filename = UtilsTest.getimage("fit2d.f2d.bz2")[:-4] self.tiffilename = UtilsTest.getimage("fit2d.tif.bz2")[:-4] def test_read(self): """ Check it reads a mask OK """ i = fit2dimage() i.read(self.filename) self.assertEqual(i.dim1, 25) self.assertEqual(i.dim2, 28) self.assertEqual(i.bpp, 4) self.assertEqual(i.bytecode, numpy.float32) self.assertEqual(i.data.shape, (28, 25)) def test_match(self): """ test edf and msk are the same """ i = fabio.open(self.filename) j = fabio.open(self.tiffilename) i.read(self.filename) self.assertEqual(i.data.shape, j.data.shape) diff = j.data - numpy.flipud(i.data) sumd = abs(diff).sum(dtype=float) self.assertEqual(sumd, 0) def test_mask(self): img = fabio.open(UtilsTest.getimage("Pilatus1M.f2d.bz2")) cbf = fabio.open(UtilsTest.getimage("Pilatus1M.cbf.bz2")) msk = fabio.open(UtilsTest.getimage("Pilatus1M.msk.bz2")) diff = abs((img.data).astype("int32") - cbf.data) self.assertEqual(diff.sum(), 0) diff = abs((msk.data).astype("int32") - img.header["data_mask"].astype("int32")) self.assertEqual(diff.sum(), 0) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestFit2DImage)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/test_all_images.py0000644001611600070440000000542013227357030021460 0ustar kiefferscisoft00000000000000 """ Check we can read all the test images """ from __future__ import print_function import glob import os import time import fabio.openimage from ..third_party import gzip import bz2 import pstats import sys try: import cProfile except ImportError: import profile as cProfile times = {} images = [] for fname in glob.glob(os.path.join("testimages", "*")): if fname.find("header_only") == -1: images.append(fname) images.sort() def shellbench(cmd, imname): """ The shell appears to be lying about it's performance. It claims zero time to gunzip a file when it actually takes 200 ms. This is cheating via a cache I suspect. We shall try to avoid this problem """ if sys.platform != "win32": os.system("touch " + imname) astart = time.time() dummy_file = os.popen(cmd + " " + imname, "rb").read() return time.time() - astart print("I/O 1 : Time to read the image") print("I/O 2 : Time to read the image (repeat") print("Fabio : Time for fabio to read the image") print("Shell : Time for shell to do decompression") print("Python : Time for python to do decompression\n") print("I/O 1 I/O 2 Fabio Shell Python Size/MB") for im in images: # Network/disk io time first start = time.clock() the_file = open(im, "rb").read() times[im] = [time.clock() - start] start = time.clock() # Network/disk should be cached the_file = open(im, "rb").read() times[im].append(time.clock() - start) start = time.clock() try: fim = fabio.openimage.openimage(im) except KeyboardInterrupt: raise except: print("Problem with", im) continue # raise times[im].append(time.clock() - start) nt = 3 ns = 2 # Now check for a fabio slowdown effect if im[-3:] == '.gz': times[im].append(shellbench("gzip -cd ", im)) nt += 1 ns -= 1 start = time.clock() the_file = gzip.GzipFile(im, "rb").read() times[im].append(time.clock() - start) nt += 1 ns -= 1 if im[-4:] == '.bz2': times[im].append(shellbench("bzip2 -cd ", im)) nt += 1 ns -= 1 start = time.clock() the_file = bz2.BZ2File(im, "rb").read() times[im].append(time.clock() - start) nt += 1 ns -= 1 # Speed ratings in megabytes per second (for fabio) MB = len(the_file) / 1024.0 / 1024.0 try: print(("%.4f "*nt + " "*7 * ns) % tuple(times[im]), "%8.3f" % (MB), im) except: print(times[im], MB, im) raise cProfile.run("fabio.openimage.openimage(im)", "stats") p = pstats.Stats("stats") # Hack around python2.4 s = sys.stdout sys.stdout = open("profile.txt", "a") p.strip_dirs().sort_stats(-1).print_stats() sys.stdout = s fabio-0.6.0/fabio/test/testfilenames.py0000644001611600070440000001063713227357030021175 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Test cases for filename deconstruction testsuite by Jerome Kieffer (Jerome.Kieffer@esrf.eu) 28/11/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] CASES = [ (1, 'edf', "data0001.edf"), (10001, 'edf', "data10001.edf"), (10001, 'edf', "data10001.edf.gz"), (10001, 'edf', "data10001.edf.bz2"), (2, 'marccd', "data0002.mccd"), (12345, 'marccd', "data12345.mccd"), (10001, 'marccd', "data10001.mccd.gz"), (10001, 'marccd', "data10001.mccd.bz2"), (123, 'marccd', "data123.mccd.gz"), (3, 'tif_or_pilatus', "data0003.tif"), (4, 'tif_or_pilatus', "data0004.tiff"), (12, 'bruker', "sucrose101.012.gz"), (99, 'bruker', "sucrose101.099"), (99, 'bruker', "sucrose101.0099"), (99, 'bruker', "sucrose101.0099.bz2"), (99, 'bruker', "sucrose101.0099.gz"), (2, 'fit2dmask', "fit2d.msk"), (None, 'fit2dmask', "mymask.msk"), (670005, 'edf', 'S82P670005.edf'), (670005, 'edf', 'S82P670005.edf.gz'), # based on only the name it can be either img or oxd (1, 'adsc_or_oxd_or_hipic_or_raxis', 'mb_LP_1_001.img'), (2, 'adsc_or_oxd_or_hipic_or_raxis', 'mb_LP_1_002.img.gz'), (3, 'adsc_or_oxd_or_hipic_or_raxis', 'mb_LP_1_003.img.bz2'), (3, 'adsc_or_oxd_or_hipic_or_raxis', os.path.join("data", 'mb_LP_1_003.img.bz2')), ] MORE_CASES = [ ("data0010.edf", "data0012.edf", 10), ("data1000.pnm", "data999.pnm", 1000), ("data0999.pnm", "data1000.pnm", 999), ("data123457.edf", "data123456.edf", 123457), ("d0ata000100.mccd", "d0ata000012.mccd", 100), (os.path.join("images/sampledir", "P33S670003.edf"), os.path.join("images/sampledir", "P33S670002.edf"), 670003), (os.path.join("images/P33S67", "P33S670003.edf"), os.path.join("images/P33S67", "P33S670002.edf"), 670003), ("image2301.mar2300", "image2300.mar2300", 2301), ("image2300.mar2300", "image2301.mar2300", 2300), ("image.0123", "image.1234", 123), ("mymask.msk", "mymask.msk", None), ("data_123.mccd.bz2", "data_001.mccd.bz2", 123) ] class TestFilenames(unittest.TestCase): """ check the name -> number, type conversions """ def test_many_cases(self): """ loop over CASES """ for num, typ, name in CASES: obj = fabio.FilenameObject(filename=name) self.assertEqual(num, obj.num, name + " num=" + str(num) + " != obj.num=" + str(obj.num)) self.assertEqual(typ, "_or_".join(obj.format), name + " " + "_or_".join(obj.format)) self.assertEqual(name, obj.tostring(), name + " " + obj.tostring()) def test_more_cases(self): for nname, oname, num in MORE_CASES: name = fabio.construct_filename(oname, num) self.assertEqual(name, nname) def test_more_cases_jump(self): for nname, oname, num in MORE_CASES: name = fabio.jump_filename(oname, num) self.assertEqual(name, nname) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestFilenames)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testpnmimage.py0000755001611600070440000000621113227357030021023 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Test for PNM images. Jerome Kieffer, 04/12/2014 """ __author__ = "Jerome Kieffer" __date__ = "27/07/2017" import os import sys import unittest import numpy if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.pnmimage import pnmimage from fabio.openimage import openimage class TestPNM(unittest.TestCase): """basic test""" results = """image0001.pgm 1024 1024 0 28416 353.795654296875 2218.0290682517543""" def setUp(self): """Download files""" self.fn = {} for j in self.results.split("\n"): i = j.split()[0] self.fn[i] = UtilsTest.getimage(i + ".bz2")[:-4] for i in self.fn: assert os.path.exists(self.fn[i]) def test_read(self): """ check we can read pnm images""" vals = self.results.split() name = vals[0] dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] obj = openimage(self.fn[name]) self.assertAlmostEqual(mini, obj.getmin(), 4, "getmin") self.assertAlmostEqual(maxi, obj.getmax(), 4, "getmax") self.assertAlmostEqual(mean, obj.getmean(), 4, "getmean") self.assertAlmostEqual(stddev, obj.getstddev(), 4, "getstddev") self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") def test_write(self): pnmfile = os.path.join(UtilsTest.tempdir, "pnmfile.pnm") shape = (9, 11) size = shape[0] * shape[1] data = numpy.random.randint(0, 65000, size=size).reshape(shape) pnmimage(data=data).save(pnmfile) with openimage(pnmfile) as pnm: self.assertTrue(numpy.allclose(data, pnm.data), "data are the same") if os.path.exists(pnmfile): os.unlink(pnmfile) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestPNM)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testraxisimage.py0000644001611600070440000001101113227357030021346 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ # Unit tests for raxis images 28/11/2014 """ from __future__ import print_function, with_statement, division import unittest import sys import os import logging if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.raxisimage import raxisimage # filename dim1 dim2 min max mean stddev TESTIMAGES = """mgzn-20hpt.img 2300 1280 16 15040 287.82 570.72 mgzn-20hpt.img.bz2 2300 1280 16 15040 287.82 570.72 mgzn-20hpt.img.gz 2300 1280 16 15040 287.82 570.72""" # Seek from end is not supported with gzip class TestRaxisImage(unittest.TestCase): def setUp(self): """ download images """ self.mar = UtilsTest.getimage("mgzn-20hpt.img.bz2")[:-4] def test_read(self): """ Test the reading of Mar345 images """ for line in TESTIMAGES.split('\n'): vals = line.strip().split() name = vals[0] logger.debug("Testing file %s" % name) dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] obj = raxisimage() obj.read(os.path.join(os.path.dirname(self.mar), name)) self.assertAlmostEqual(mini, obj.getmin(), 2, "getmin [%s,%s]" % (mini, obj.getmin())) self.assertAlmostEqual(maxi, obj.getmax(), 2, "getmax [%s,%s]" % (maxi, obj.getmax())) self.assertAlmostEqual(mean, obj.getmean(), 2, "getmean [%s,%s]" % (mean, obj.getmean())) self.assertAlmostEqual(stddev, obj.getstddev(), 2, "getstddev [%s,%s]" % (stddev, obj.getstddev())) self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") self.assertNotEqual(obj.dim1, obj.dim2, "dim2!=dim1") def _test_write(self): self.skipTest("Write is not implemented") "Test writing with self consistency at the fabio level" for line in TESTIMAGES.split("\n"): logger.debug("Processing file: %s" % line) vals = line.split() name = vals[0] obj = raxisimage() obj.read(os.path.join(os.path.dirname(self.mar), name)) obj.write(os.path.join(UtilsTest.tempdir, name)) other = raxisimage() other.read(os.path.join(UtilsTest.tempdir, name)) self.assertEqual(abs(obj.data - other.data).max(), 0, "data are the same") for key in obj.header: if key == "filename": continue self.assertTrue(key in other.header, "Key %s is in header" % key) self.assertEqual(obj.header[key], other.header[key], "value are the same for key %s: [%s|%s]" % (key, obj.header[key], other.header[key])) os.unlink(os.path.join(UtilsTest.tempdir, name)) def test_memoryleak(self): """ This test takes a lot of time, so only in debug mode. """ N = 1000 if logger.getEffectiveLevel() <= logging.INFO: logger.debug("Testing for memory leak") for i in range(N): _img = fabio.open(self.mar) print("Reading #%s/%s" % (i, N)) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestRaxisImage)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testcompression.py0000755001611600070440000001373413227357030021577 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # Get ready for python3: from __future__ import with_statement, print_function, division __authors__ = ["Jérôme Kieffer"] __contact__ = "Jerome.Kieffer@esrf.fr" __license__ = "MIT" __copyright__ = "2011-2016 ESRF" __date__ = "27/07/2017" import unittest import sys import os import numpy if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio import compression class TestByteOffset(unittest.TestCase): """ test the byte offset compression and decompression """ def setUp(self): self.ds = numpy.array([0, 1, 2, 127, 0, 1, 2, 128, 0, 1, 2, 32767, 0, 1, 2, 32768, 0, 1, 2, 2147483647, 0, 1, 2, 2147483648, 0, 1, 2, 128, 129, 130, 32767, 32768, 128, 129, 130, 32768, 2147483647, 2147483648]) self.ref = b'\x00\x01\x01}\x81\x01\x01~\x80\x80\xff\x01\x01\x80\xfd\x7f\x80\x01\x80\x01\x01\x80\xfe\x7f\x80\x00\x80\x00\x80\xff\xff\x01\x01\x80\x00\x80\xfd\xff\xff\x7f\x80\x00\x80\x01\x00\x00\x80\x01\x01\x80\x00\x80\xfe\xff\xff\x7f\x80\x00\x80\x00\x00\x00\x80\x00\x00\x00\x80\xff\xff\xff\xff\x01\x01~\x01\x01\x80}\x7f\x01\x80\x80\x80\x01\x01\x80~\x7f\x80\x00\x80\xff\x7f\xff\x7f\x01' def tearDown(self): unittest.TestCase.tearDown(self) self.ds = self.ref = None def testComp(self): """ """ # first with numpy ds = numpy.array([0, 128]) ref = b"\x00\x80\x80\00" self.assertEqual(ref, compression.compByteOffset_numpy(ds), "test +128") ds = numpy.array([0, -128]) ref = b'\x00\x80\x80\xff' self.assertEqual(ref, compression.compByteOffset_numpy(ds), "test -128") ds = numpy.array([10, -128]) ref = b'\n\x80v\xff' self.assertEqual(ref, compression.compByteOffset_numpy(ds), "test +10 -128") self.assertEqual(self.ref, compression.compByteOffset_numpy(self.ds), "test larger") # Then with cython 32 bits ds = numpy.array([0, 128], dtype="int32") ref = b"\x00\x80\x80\00" self.assertEqual(ref, compression.compByteOffset_cython(ds), "test +128") ds = numpy.array([0, -128], dtype="int32") ref = b'\x00\x80\x80\xff' self.assertEqual(ref, compression.compByteOffset_cython(ds), "test -128") ds = numpy.array([10, -128], dtype="int32") ref = b'\n\x80v\xff' self.assertEqual(ref, compression.compByteOffset_cython(ds), "test +10 -128") self.assertEqual(self.ref, compression.compByteOffset_cython(self.ds), "test larger") # Then with cython 64bits ds = numpy.array([0, 128], dtype="int64") ref = b"\x00\x80\x80\00" self.assertEqual(ref, compression.compByteOffset_cython(ds), "test +128") ds = numpy.array([0, -128], dtype="int64") ref = b'\x00\x80\x80\xff' self.assertEqual(ref, compression.compByteOffset_cython(ds), "test -128") ds = numpy.array([10, -128], dtype="int64") ref = b'\n\x80v\xff' self.assertEqual(ref, compression.compByteOffset_cython(ds), "test +10 -128") self.assertEqual(self.ref, compression.compByteOffset_cython(self.ds), "test larger") def testSC(self): """test that datasets are unchanged after various compression/decompressions""" obt_np = compression.decByteOffset_numpy(compression.compByteOffset_numpy(self.ds)) self.assertEqual(abs(self.ds - obt_np).max(), 0.0, "numpy-numpy algo") obt_cy = compression.decByteOffset_cython(compression.compByteOffset_numpy(self.ds)) self.assertEqual(abs(self.ds - obt_cy).max(), 0.0, "cython-numpy algo") obt_cy2 = compression.decByteOffset_cython(compression.compByteOffset_numpy(self.ds), self.ds.size) self.assertEqual(abs(self.ds - obt_cy2).max(), 0.0, "cython2-numpy algo_orig") obt_np = compression.decByteOffset_numpy(compression.compByteOffset_cython(self.ds)) self.assertEqual(abs(self.ds - obt_np).max(), 0.0, "numpy-numpy algo") obt_cy = compression.decByteOffset_cython(compression.compByteOffset_cython(self.ds)) self.assertEqual(abs(self.ds - obt_cy).max(), 0.0, "cython-numpy algo") obt_cy2 = compression.decByteOffset_cython(compression.compByteOffset_cython(self.ds), self.ds.size) self.assertEqual(abs(self.ds - obt_cy2).max(), 0.0, "cython2-numpy algo_orig") def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestByteOffset)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testfit2dmaskimage.py0000644001611600070440000001201213227357030022106 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Test the fit2d mask reader Updated by Jerome Kieffer (jerome.kieffer@esrf.eu), 2011 28/11/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os import numpy if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.fit2dmaskimage import fit2dmaskimage class TestFaceMask(unittest.TestCase): """ test the picture of a face """ def setUp(self): """ download images """ self.filename = UtilsTest.getimage("face.msk.bz2")[:-4] self.edffilename = UtilsTest.getimage("face.edf.bz2")[:-4] def test_getmatch(self): """ test edf and msk are the same """ i = fit2dmaskimage() i.read(self.filename) j = fabio.open(self.edffilename) # print "edf: dim1",oe.dim1,"dim2",oe.dim2 self.assertEqual(i.dim1, j.dim1) self.assertEqual(i.dim2, j.dim2) self.assertEqual(i.data.shape, j.data.shape) diff = j.data - i.data sumd = abs(diff).sum(dtype=float) self.assertEqual(sumd, 0.0) class TestClickedMask(unittest.TestCase): """ A few random clicks to make a test mask """ def setUp(self): """ download images """ self.filename = UtilsTest.getimage("fit2d_click.msk.bz2")[:-4] self.edffilename = UtilsTest.getimage("fit2d_click.edf.bz2")[:-4] def test_read(self): """ Check it reads a mask OK """ i = fit2dmaskimage() i.read(self.filename) self.assertEqual(i.dim1, 1024) self.assertEqual(i.dim2, 1024) self.assertEqual(i.bpp, 1) self.assertEqual(i.bytecode, numpy.uint8) self.assertEqual(i.data.shape, (1024, 1024)) def test_getmatch(self): """ test edf and msk are the same """ i = fit2dmaskimage() j = fabio.open(self.edffilename) i.read(self.filename) self.assertEqual(i.data.shape, j.data.shape) diff = j.data - i.data self.assertEqual(i.getmax(), 1) self.assertEqual(i.getmin(), 0) sumd = abs(diff).sum(dtype=float) self.assertEqual(sumd, 0) class TestMskWrite(unittest.TestCase): """ Write dummy mask files with various compression schemes """ def setUp(self): shape = (199, 211) # those are prime numbers self.data = (numpy.random.random(shape) > 0.6) self.header = fit2dmaskimage.check_header() def atest(self): e = fit2dmaskimage(data=self.data, header=self.header) e.write(self.filename) r = fabio.open(self.filename) self.assertEqual(e.dim1, r.dim1, "dim1 are the same") self.assertEqual(e.dim2, r.dim2, "dim2 are the same") if r.header != self.header: print("Issue with header in TestMskWrite.testFlat") for k, v in r.header.items(): print(k, v, self.header.get(k)) print(e.header) print(r.header) print(self.header) else: self.assertTrue(r.header == self.header, "header are OK") self.assertTrue(abs(r.data - self.data).max() == 0, "data are OK") def testFlat(self): self.filename = os.path.join(UtilsTest.tempdir, "random.msk") self.atest() def testGzip(self): self.filename = os.path.join(UtilsTest.tempdir, "random.msk.gz") self.atest() def testBzip2(self): self.filename = os.path.join(UtilsTest.tempdir, "random.msk.gz") self.atest() def tearDown(self): if os.path.isfile(self.filename): os.unlink(self.filename) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestFaceMask)) testsuite.addTest(loadTests(TestClickedMask)) testsuite.addTest(loadTests(TestMskWrite)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/test_all.py0000755001611600070440000001045713227357030020144 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """Test suite for all fabio modules.""" from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from . import testfabioimage from . import testedfimage from . import testcbfimage from . import testfilenames from . import test_file_series from . import test_filename_steps from . import testadscimage from . import testfit2dmaskimage from . import testGEimage from . import testheadernotsingleton from . import testmar345image from . import testbrukerimage from . import testbruker100image from . import testmccdimage from . import testopenheader from . import testopenimage from . import testOXDimage from . import testkcdimage from . import testtifimage from . import testXSDimage from . import testraxisimage from . import testpnmimage from . import test_flat_binary from . import testnumpyimage from . import testcompression from . import testpilatusimage from . import test_nexus from . import testeigerimage from . import testhdf5image from . import testfit2dimage from . import testspeimage from . import testfabioconvert from . import testjpegimage from . import testjpeg2kimage from . import testmpaimage from . import testdm3image from . import test_failing_files from . import test_formats def suite(): testSuite = unittest.TestSuite() testSuite.addTest(testfabioimage.suite()) testSuite.addTest(testedfimage.suite()) testSuite.addTest(testcbfimage.suite()) testSuite.addTest(testfilenames.suite()) testSuite.addTest(test_file_series.suite()) testSuite.addTest(test_filename_steps.suite()) testSuite.addTest(testadscimage.suite()) testSuite.addTest(testfit2dmaskimage.suite()) testSuite.addTest(testGEimage.suite()) testSuite.addTest(testheadernotsingleton.suite()) testSuite.addTest(testmar345image.suite()) testSuite.addTest(testbrukerimage.suite()) testSuite.addTest(testbruker100image.suite()) testSuite.addTest(testmccdimage.suite()) testSuite.addTest(testopenheader.suite()) testSuite.addTest(testopenimage.suite()) testSuite.addTest(testOXDimage.suite()) testSuite.addTest(testkcdimage.suite()) testSuite.addTest(testtifimage.suite()) testSuite.addTest(testXSDimage.suite()) testSuite.addTest(testraxisimage.suite()) testSuite.addTest(testpnmimage.suite()) testSuite.addTest(test_flat_binary.suite()) testSuite.addTest(testnumpyimage.suite()) testSuite.addTest(testcompression.suite()) testSuite.addTest(testpilatusimage.suite()) testSuite.addTest(test_nexus.suite()) testSuite.addTest(testeigerimage.suite()) testSuite.addTest(testhdf5image.suite()) testSuite.addTest(testfit2dimage.suite()) testSuite.addTest(testspeimage.suite()) testSuite.addTest(testfabioconvert.suite()) testSuite.addTest(testjpegimage.suite()) testSuite.addTest(testjpeg2kimage.suite()) testSuite.addTest(testmpaimage.suite()) testSuite.addTest(testdm3image.suite()) testSuite.addTest(test_failing_files.suite()) testSuite.addTest(test_formats.suite()) return testSuite if __name__ == '__main__': runner = unittest.TextTestRunner() if not runner.run(suite()).wasSuccessful(): sys.exit(1) fabio-0.6.0/fabio/test/test_failing_files.py0000644001611600070440000000730413227357030022161 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """Test failing files """ from __future__ import print_function, with_statement, division, absolute_import import unittest import os import io import fabio import tempfile import shutil class TestFailingFiles(unittest.TestCase): """Test failing files""" @classmethod def setUpClass(cls): cls.tmp_directory = tempfile.mkdtemp() cls.createResources(cls.tmp_directory) @classmethod def createResources(cls, directory): cls.txt_filename = os.path.join(directory, "test.txt") with io.open(cls.txt_filename, "w+t") as f: f.write(u"Kikoo") cls.bad_edf_filename = os.path.join(directory, "bad_edf.edf") with io.open(cls.bad_edf_filename, "w+b") as f: f.write(b"\r{") f.write(b"\x00\xFF\x99" * 10) cls.bad_edf2_filename = os.path.join(directory, "bad_edf2.edf") with io.open(cls.bad_edf2_filename, "w+b") as f: f.write(b"\n{\n\n}\n") f.write(b"\xFF\x00\x99" * 10) cls.bad_msk_filename = os.path.join(directory, "bad_msk.msk") with io.open(cls.bad_msk_filename, "w+b") as f: f.write(b'M\x00\x00\x00A\x00\x00\x00S\x00\x00\x00K\x00\x00\x00') f.write(b"\x00\xFF\x99" * 10) cls.bad_dm3_filename = os.path.join(directory, "bad_dm3.dm3") with io.open(cls.bad_dm3_filename, "w+b") as f: f.write(b'\x00\x00\x00\x03') f.write(b"\x00\xFF\x99" * 10) cls.bad_npy_filename = os.path.join(directory, "bad_numpy.npy") with io.open(cls.bad_npy_filename, "w+b") as f: f.write(b"\x93NUMPY") f.write(b"\x00\xFF\x99" * 10) cls.missing_filename = os.path.join(directory, "test.missing") @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmp_directory) def test_missing_file(self): self.assertRaises(IOError, fabio.open, self.missing_filename) def test_wrong_format(self): self.assertRaises(IOError, fabio.open, self.txt_filename) def test_wrong_edf(self): self.assertRaises(IOError, fabio.open, self.bad_edf_filename) def test_wrong_edf2(self): self.assertRaises(IOError, fabio.open, self.bad_edf_filename) def test_wrong_msk(self): self.assertRaises(ValueError, fabio.open, self.bad_msk_filename) def test_wrong_dm3(self): self.assertRaises(ValueError, fabio.open, self.bad_dm3_filename) def test_wrong_numpy(self): self.assertRaises(ValueError, fabio.open, self.bad_npy_filename) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestFailingFiles)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testbruker100image.py0000644001611600070440000000777513227357030021761 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ #bruker100 Unit tests 19/01/2015 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) from fabio.bruker100image import Bruker100Image from fabio.openimage import openimage # filename dim1 dim2 min max mean stddev TESTIMAGES = """NaCl_10_01_0009.sfrm 512 512 -30 5912 34.4626 26.189 NaCl_10_01_0009.sfrm.gz 512 512 -30 5912 34.4626 26.189 NaCl_10_01_0009.sfrm.bz2 512 512 -30 5912 34.4626 26.189""" REFIMAGE = "NaCl_10_01_0009.npy.bz2" class TestBruker100(unittest.TestCase): """ check some read data from bruker version100 detector""" def setUp(self): """ download images """ UtilsTest.getimage(REFIMAGE) self.im_dir = os.path.dirname(UtilsTest.getimage(TESTIMAGES.split()[0] + ".bz2")) def test_read(self): """ check we can read bruker100 images""" for line in TESTIMAGES.split("\n"): vals = line.split() name = vals[0] dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] obj = Bruker100Image() obj.read(os.path.join(self.im_dir, name)) self.assertAlmostEqual(mini, obj.getmin(), 2, "getmin") self.assertAlmostEqual(maxi, obj.getmax(), 2, "getmax") self.assertAlmostEqual(mean, obj.getmean(), 2, "getmean") self.assertAlmostEqual(stddev, obj.getstddev(), 2, "getstddev") self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") def test_same(self): """ check we can read bruker100 images""" ref = openimage(os.path.join(self.im_dir, REFIMAGE)) for line in TESTIMAGES.split("\n"): obt = openimage(os.path.join(self.im_dir, line.split()[0])) self.assertTrue(abs(ref.data - obt.data).max() == 0, "data are the same") def test_write(self): fname = TESTIMAGES.split()[0] obt = openimage(os.path.join(self.im_dir, fname)) name = os.path.basename(fname) obj = Bruker100Image(data=obt.data, header=obt.header) obj.write(os.path.join(UtilsTest.tempdir, name)) other = openimage(os.path.join(UtilsTest.tempdir, name)) self.assertEqual(abs(obt.data - other.data).max(), 0, "data are the same") for key in obt.header: self.assertTrue(key in other.header, "Key %s is in header" % key) self.assertEqual(obt.header[key], other.header[key], "value are the same for key %s" % key) os.unlink(os.path.join(UtilsTest.tempdir, name)) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestBruker100)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testcbfimage.py0000755001611600070440000001611713227357030020771 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ 2011: Jerome Kieffer for ESRF. Unit tests for CBF images based on references images taken from: http://pilatus.web.psi.ch/DATA/DATASETS/insulin_0.2/ 19/01/2015 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os import time if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.cbfimage import cbfimage from fabio.compression import decByteOffset_numpy, decByteOffset_cython from fabio.third_party.six import PY3 if PY3: from fabio.fabioutils import unicode class TestCbfReader(unittest.TestCase): """ test cbf image reader """ def __init__(self, methodName): "Constructor of the class" unittest.TestCase.__init__(self, methodName) self.edf_filename = os.path.join(UtilsTest.image_home, "run2_1_00148.edf") self.cbf_filename = os.path.join(UtilsTest.image_home, "run2_1_00148.cbf") def setUp(self): """Download images""" UtilsTest.getimage(os.path.basename(self.edf_filename + ".bz2")) UtilsTest.getimage(os.path.basename(self.cbf_filename + ".bz2")) def test_read(self): """ check whole reader""" times = [] times.append(time.time()) cbf = fabio.open(self.cbf_filename) times.append(time.time()) edf = fabio.open(self.edf_filename) times.append(time.time()) self.assertAlmostEqual(0, abs(cbf.data - edf.data).max()) logger.info("Reading CBF took %.3fs whereas the same EDF took %.3fs" % (times[1] - times[0], times[2] - times[1])) def test_write(self): "Rest writing with self consistency at the fabio level" name = os.path.basename(self.cbf_filename) obj = cbfimage() obj.read(self.cbf_filename) obj.write(os.path.join(UtilsTest.tempdir, name)) other = cbfimage() other.read(os.path.join(UtilsTest.tempdir, name)) self.assertEqual(abs(obj.data - other.data).max(), 0, "data are the same") for key in obj.header: if key in["filename", "X-Binary-Size-Padding"]: continue self.assertTrue(key in other.header, "Key %s is in header" % key) self.assertEqual(obj.header[key], other.header[key], "value are the same for key %s" % key) # By destroying the object, one actually closes the file, which is needed under windows. del obj del other if os.path.exists(os.path.join(UtilsTest.tempdir, name)): os.unlink(os.path.join(UtilsTest.tempdir, name)) def test_byte_offset(self): """ check byte offset algorithm""" cbf = fabio.open(self.cbf_filename) starter = b"\x0c\x1a\x04\xd5" cbs = cbf.cbs startPos = cbs.find(starter) + 4 data = cbs[startPos: startPos + int(cbf.header["X-Binary-Size"])] startTime = time.time() numpyRes = decByteOffset_numpy(data, size=cbf.dim1 * cbf.dim2) tNumpy = time.time() - startTime logger.info("Timing for Numpy method : %.3fs" % tNumpy) startTime = time.time() cythonRes = decByteOffset_cython(stream=data, size=cbf.dim1 * cbf.dim2) tCython = time.time() - startTime delta = abs(numpyRes - cythonRes).max() self.assertAlmostEqual(0, delta) logger.info("Timing for Cython method : %.3fs, max delta= %s" % (tCython, delta)) def test_consitency_manual(self): """ Test if an image can be read and saved and the results are "similar" """ name = os.path.basename(self.cbf_filename) obj = fabio.open(self.cbf_filename) new = fabio.cbfimage.cbfimage(data=obj.data, header=obj.header) new.write(os.path.join(UtilsTest.tempdir, name)) other = fabio.open(os.path.join(UtilsTest.tempdir, name)) self.assertEqual(abs(obj.data - other.data).max(), 0, "data are the same") for key in obj.header: if key in["filename", "X-Binary-Size-Padding"]: continue self.assertTrue(key in other.header, "Key %s is in header" % key) self.assertEqual(obj.header[key], other.header[key], "value are the same for key %s [%s|%s]" % (key, obj.header[key], other.header[key])) def test_consitency_convert(self): """ Test if an image can be read and saved and the results are "similar" """ name = os.path.basename(self.cbf_filename) obj = fabio.open(self.cbf_filename) new = obj.convert("cbf") new.write(os.path.join(UtilsTest.tempdir, name)) other = fabio.open(os.path.join(UtilsTest.tempdir, name)) self.assertEqual(abs(obj.data - other.data).max(), 0, "data are the same") for key in obj.header: if key in["filename", "X-Binary-Size-Padding"]: continue self.assertTrue(key in other.header, "Key %s is in header" % key) self.assertEqual(obj.header[key], other.header[key], "value are the same for key %s [%s|%s]" % (key, obj.header[key], other.header[key])) def test_unicode(self): """ Test if an image can be read and saved to an unicode named """ name = unicode(os.path.basename(self.cbf_filename)) obj = fabio.open(self.cbf_filename) obj.write(os.path.join(UtilsTest.tempdir, name)) other = fabio.open(os.path.join(UtilsTest.tempdir, name)) self.assertEqual(abs(obj.data - other.data).max(), 0, "data are the same") for key in obj.header: if key in["filename", "X-Binary-Size-Padding"]: continue self.assertTrue(key in other.header, "Key %s is in header" % key) self.assertEqual(obj.header[key], other.header[key], "value are the same for key %s [%s|%s]" % (key, obj.header[key], other.header[key])) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestCbfReader)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testedfimage.py0000755001611600070440000005165113227357030020777 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ # Unit tests # builds on stuff from ImageD11.test.testpeaksearch 28/11/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os import numpy import tempfile import shutil import io import fabio.edfimage if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from ..edfimage import edfimage from ..third_party import six from ..fabioutils import GzipFile, BZ2File class TestFlatEdfs(unittest.TestCase): """ test some flat images """ def common_setup(self): self.BYTE_ORDER = "LowByteFirst" if numpy.little_endian else "HighByteFirst" self.MYHEADER = six.b("{\n%-1020s}\n" % ( """Omega = 0.0 ; Dim_1 = 256 ; Dim_2 = 256 ; DataType = FloatValue ; ByteOrder = %s ; Image = 1; History-1 = something=something else; \n\n""" % self.BYTE_ORDER)) self.MYIMAGE = numpy.ones((256, 256), numpy.float32) * 10 self.MYIMAGE[0, 0] = 0 self.MYIMAGE[1, 1] = 20 assert len(self.MYIMAGE[0:1, 0:1].tostring()) == 4, self.MYIMAGE[0:1, 0:1].tostring() def setUp(self): """ initialize""" self.common_setup() self.filename = os.path.join(UtilsTest.tempdir, "im0000.edf") if not os.path.isfile(self.filename): outf = open(self.filename, "wb") assert len(self.MYHEADER) % 1024 == 0 outf.write(self.MYHEADER) outf.write(self.MYIMAGE.tostring()) outf.close() def tearDown(self): unittest.TestCase.tearDown(self) self.BYTE_ORDER = self.MYHEADER = self.MYIMAGE = None def test_read(self): """ check readable""" obj = edfimage() obj.read(self.filename) self.assertEqual(obj.dim1, 256, msg="dim1!=256 for file: %s" % self.filename) self.assertEqual(obj.dim2, 256, msg="dim2!=256 for file: %s" % self.filename) self.assertEqual(obj.bpp, 4, msg="bpp!=4 for file: %s" % self.filename) self.assertEqual(obj.bytecode, numpy.float32, msg="bytecode!=flot32 for file: %s" % self.filename) self.assertEqual(obj.data.shape, (256, 256), msg="shape!=(256,256) for file: %s" % self.filename) self.assertEqual(obj.header['History-1'], "something=something else") def test_getstats(self): """ test statistics""" obj = edfimage() obj.read(self.filename) self.assertEqual(obj.getmean(), 10) self.assertEqual(obj.getmin(), 0) self.assertEqual(obj.getmax(), 20) class TestBzipEdf(TestFlatEdfs): """ same for bzipped versions """ def setUp(self): """set it up""" TestFlatEdfs.setUp(self) if not os.path.isfile(self.filename + ".bz2"): with BZ2File(self.filename + ".bz2", "wb") as f: with open(self.filename, "rb") as d: f.write(d.read()) self.filename += ".bz2" class TestGzipEdf(TestFlatEdfs): """ same for gzipped versions """ def setUp(self): """ set it up """ TestFlatEdfs.setUp(self) if not os.path.isfile(self.filename + ".gz"): with GzipFile(self.filename + ".gz", "wb") as f: with open(self.filename, "rb") as d: f.write(d.read()) self.filename += ".gz" # statistics come from fit2d I think # filename dim1 dim2 min max mean stddev TESTIMAGES = """F2K_Seb_Lyso0675.edf 2048 2048 982 17467 1504.29 217.61 F2K_Seb_Lyso0675.edf.bz2 2048 2048 982 17467 1504.29 217.61 F2K_Seb_Lyso0675.edf.gz 2048 2048 982 17467 1504.29 217.61 id13_badPadding.edf 512 512 85 61947 275.62 583.44 """ class TestEdfs(unittest.TestCase): """ Read some test images """ def setUp(self): self.im_dir = os.path.dirname(UtilsTest.getimage("F2K_Seb_Lyso0675.edf.bz2")) UtilsTest.getimage("id13_badPadding.edf.bz2") def test_read(self): """ check we can read these images""" for line in TESTIMAGES.split("\n"): vals = line.split() name = vals[0] dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] obj = edfimage() try: obj.read(os.path.join(self.im_dir, name)) except: print("Cannot read image", name) raise self.assertAlmostEqual(mini, obj.getmin(), 2, "testedfs: %s getmin()" % name) self.assertAlmostEqual(maxi, obj.getmax(), 2, "testedfs: %s getmax" % name) logger.info("%s Mean: exp=%s, obt=%s" % (name, mean, obj.getmean())) self.assertAlmostEqual(mean, obj.getmean(), 2, "testedfs: %s getmean" % name) logger.info("%s StdDev: exp=%s, obt=%s" % (name, stddev, obj.getstddev())) self.assertAlmostEqual(stddev, obj.getstddev(), 2, "testedfs: %s getstddev" % name) self.assertEqual(dim1, obj.dim1, "testedfs: %s dim1" % name) self.assertEqual(dim2, obj.dim2, "testedfs: %s dim2" % name) obj = None def test_rebin(self): """test the rebin of edfdata""" f = edfimage() f.read(os.path.join(self.im_dir, "F2K_Seb_Lyso0675.edf")) f.rebin(1024, 1024) self.assertEqual(abs(numpy.array([[1547, 1439], [1536, 1494]]) - f.data).max(), 0, "data are the same after rebin") def tearDown(self): unittest.TestCase.tearDown(self) self.im_dir = None class TestEdfCompressedData(unittest.TestCase): """ Read some test images with their data-block compressed. Z-Compression and Gzip compression are implemented Bzip2 and byte offet are experimental """ def setUp(self): self.im_dir = os.path.dirname(UtilsTest.getimage("edfGzip_U16.edf.bz2")) UtilsTest.getimage("edfCompressed_U16.edf.bz2") UtilsTest.getimage("edfUncompressed_U16.edf.bz2") def test_read(self): """ check we can read these images""" ref = edfimage() gzipped = edfimage() compressed = edfimage() refFile = "edfUncompressed_U16.edf" gzippedFile = "edfGzip_U16.edf" compressedFile = "edfCompressed_U16.edf" ref.read(os.path.join(self.im_dir, refFile)) gzipped.read(os.path.join(self.im_dir, gzippedFile)) compressed.read(os.path.join(self.im_dir, compressedFile)) self.assertEqual((ref.data - gzipped.data).max(), 0, "Gzipped data block is correct") self.assertEqual((ref.data - compressed.data).max(), 0, "Zlib compressed data block is correct") class TestEdfMultiFrame(unittest.TestCase): """ Read some test images with their data-block compressed. Z-Compression and Gzip compression are implemented Bzip2 and byte offet are experimental """ def setUp(self): self.multiFrameFilename = UtilsTest.getimage("MultiFrame.edf.bz2")[:-4] self.Frame0Filename = UtilsTest.getimage("MultiFrame-Frame0.edf.bz2")[:-4] self.Frame1Filename = UtilsTest.getimage("MultiFrame-Frame1.edf.bz2")[:-4] self.ref = edfimage() self.frame0 = edfimage() self.frame1 = edfimage() self.ref.read(self.multiFrameFilename) self.frame0.read(self.Frame0Filename) self.frame1.read(self.Frame1Filename) def tearDown(self): unittest.TestCase.tearDown(self) self.multiFrameFilename = self.Frame0Filename = self.Frame1Filename = self.ref = self.frame0 = self.frame1 = None def test_getFrame_multi(self): self.assertEqual((self.ref.data - self.frame0.data).max(), 0, "getFrame_multi: Same data for frame 0") f1_multi = self.ref.getframe(1) # logger.warning("f1_multi.header=%s\nf1_multi.data= %s" % (f1_multi.header, f1_multi.data)) self.assertEqual((f1_multi.data - self.frame1.data).max(), 0, "getFrame_multi: Same data for frame 1") def test_getFrame_mono(self): self.assertEqual((self.ref.data - self.frame0.data).max(), 0, "getFrame_mono: Same data for frame 0") f1_mono = self.frame0.getframe(1) self.assertEqual((f1_mono.data - self.frame1.data).max(), 0, "getFrame_mono: Same data for frame 1") def test_next_multi(self): self.assertEqual((self.ref.data - self.frame0.data).max(), 0, "next_multi: Same data for frame 0") next_ = self.ref.next() self.assertEqual((next_.data - self.frame1.data).max(), 0, "next_multi: Same data for frame 1") def text_next_mono(self): self.assertEqual((self.ref.data - self.frame0.data).max(), 0, "next_mono: Same data for frame 0") next_ = self.frame0.next() self.assertEqual((next_.data - self.frame1.data).max(), 0, "next_mono: Same data for frame 1") def test_previous_multi(self): f1 = self.ref.getframe(1) self.assertEqual((f1.data - self.frame1.data).max(), 0, "previous_multi: Same data for frame 1") f0 = f1.previous() self.assertEqual((f0.data - self.frame1.data).max(), 0, "previous_multi: Same data for frame 0") def test_previous_mono(self): f1 = self.ref.getframe(1) self.assertEqual((f1.data - self.frame1.data).max(), 0, "previous_mono: Same data for frame 1") prev = self.frame1.previous() self.assertEqual((prev.data - self.frame0.data).max(), 0, "previous_mono: Same data for frame 0") def test_openimage_multiframes(self): "test if openimage can directly read first or second frame of a multi-frame" self.assertEqual((fabio.open(self.multiFrameFilename).data - self.frame0.data).max(), 0, "openimage_multiframes: Same data for default ") # print(fabio.open(self.multiFrameFilename, 0).data) self.assertEqual((fabio.open(self.multiFrameFilename, 0).data - self.frame0.data).max(), 0, "openimage_multiframes: Same data for frame 0") self.assertEqual((fabio.open(self.multiFrameFilename, 1).data - self.frame1.data).max(), 0, "openimage_multiframes: Same data for frame 1") class TestEdfFastRead(unittest.TestCase): """ Read some test images with their data-block compressed. Z-Compression and Gzip compression are implemented Bzip2 and byte offet are experimental """ def setUp(self): self.refFilename = UtilsTest.getimage("MultiFrame-Frame0.edf.bz2") self.fastFilename = self.refFilename[:-4] def test_fastread(self): ref = fabio.open(self.refFilename) refdata = ref.data obt = ref.fastReadData(self.fastFilename) self.assertEqual(abs(obt - refdata).max(), 0, "testedffastread: Same data") class TestEdfWrite(unittest.TestCase): """ Write dummy edf files with various compression schemes """ tmpdir = UtilsTest.tempdir def setUp(self): self.data = numpy.arange(100).reshape((10, 10)) self.header = {"toto": "tutu"} def testFlat(self): self.filename = os.path.join(self.tmpdir, "merged.azim") e = edfimage(data=self.data, header=self.header) e.write(self.filename) r = fabio.open(self.filename) self.assertTrue(r.header["toto"] == self.header["toto"], "header are OK") self.assertTrue(abs(r.data - self.data).max() == 0, "data are OK") self.assertEqual(int(r.header["EDF_HeaderSize"]), 512, "header size is one 512 block") def testGzip(self): self.filename = os.path.join(self.tmpdir, "merged.azim.gz") e = edfimage(data=self.data, header=self.header) e.write(self.filename) r = fabio.open(self.filename) self.assertTrue(r.header["toto"] == self.header["toto"], "header are OK") self.assertTrue(abs(r.data - self.data).max() == 0, "data are OK") self.assertEqual(int(r.header["EDF_HeaderSize"]), 512, "header size is one 512 block") def testBzip2(self): self.filename = os.path.join(self.tmpdir, "merged.azim.gz") e = edfimage(data=self.data, header=self.header) e.write(self.filename) r = fabio.open(self.filename) self.assertTrue(r.header["toto"] == self.header["toto"], "header are OK") self.assertTrue(abs(r.data - self.data).max() == 0, "data are OK") self.assertEqual(int(r.header["EDF_HeaderSize"]), 512, "header size is one 512 block") def tearDown(self): os.unlink(self.filename) class TestEdfRegression(unittest.TestCase): """ Test suite to prevent regression """ def bug_27(self): """ import fabio obj = fabio.open("any.edf") obj.header["missing"]="blah" obj.write("any.edf") """ # create dummy image: shape = (32, 32) data = numpy.random.randint(0, 6500, size=shape[0] * shape[1]).astype("uint16").reshape(shape) fname = os.path.join(UtilsTest.tempdir, "bug27.edf") e = edfimage(data=data, header={"key1": "value1"}) e.write(fname) del e obj = fabio.open(fname) obj.header["missing"] = "blah" obj.write(fname) del obj class TestBadFiles(unittest.TestCase): filename_template = "%s.edf" @classmethod def setUpClass(cls): cls.tmp_directory = tempfile.mkdtemp(prefix=cls.__name__) cls.create_resources() @classmethod def tearDownClass(cls): return shutil.rmtree(cls.tmp_directory) @classmethod def create_resources(cls): filename = os.path.join(cls.tmp_directory, cls.filename_template % "base") cls.base_filename = filename with io.open(filename, "wb") as fd: cls.write_header(fd, 1) cls.header1 = fd.tell() cls.write_data(fd) cls.data1 = fd.tell() cls.write_header(fd, 2) cls.header2 = fd.tell() cls.write_data(fd) cls.data2 = fd.tell() @classmethod def write_header(cls, fd, image_number): byte_order = "LowByteFirst" if numpy.little_endian else "HighByteFirst" byte_order = six.b(byte_order) fd.write(six.b("{\n")) fd.write(six.b("Omega = 0.0 ;\n")) fd.write(six.b("Dim_1 = 256 ;\n")) fd.write(six.b("Dim_2 = 256 ;\n")) fd.write(six.b("DataType = FloatValue ;\n")) fd.write(six.b("ByteOrder = %s ;\n" % byte_order)) fd.write(six.b("Image = %d ;\n" % image_number)) fd.write(six.b("History-1 = something=something else;\n")) fd.write(six.b("}\n")) @classmethod def write_data(cls, fd): data = numpy.ones((256, 256), numpy.float32) * 10 data[0, 0] = 0 data[1, 1] = 20 fd.write(data.tostring()) @classmethod def copy_base(cls, filename, size): with io.open(cls.base_filename, "rb") as fd_base: with io.open(filename, "wb") as fd_result: fd_result.write(fd_base.read(size)) @classmethod def open(cls, filename): image = fabio.edfimage.EdfImage() image.read(filename) return image def test_base(self): filename = os.path.join(self.tmp_directory, self.filename_template % str(self.id())) size = self.data2 self.copy_base(filename, size) image = self.open(filename) self.assertEqual(image.nframes, 2) frame = image.getframe(0) self.assertEqual(frame.header["Image"], "1") self.assertEqual(frame.data[-1].sum(), 2560) frame = image.getframe(1) self.assertEqual(frame.header["Image"], "2") self.assertEqual(frame.data[-1].sum(), 2560) def test_empty(self): filename = os.path.join(self.tmp_directory, self.filename_template % str(self.id())) f = io.open(filename, "wb") f.close() self.assertRaises(IOError, self.open, filename) def test_wrong_magic(self): filename = os.path.join(self.tmp_directory, self.filename_template % str(self.id())) f = io.open(filename, "wb") f.write(six.b("\x10\x20\x30")) f.close() self.assertRaises(IOError, self.open, filename) def test_half_header(self): filename = os.path.join(self.tmp_directory, self.filename_template % str(self.id())) size = self.header1 // 2 self.copy_base(filename, size) self.assertRaises(IOError, self.open, filename) def test_header_with_no_data(self): filename = os.path.join(self.tmp_directory, self.filename_template % str(self.id())) size = self.header1 self.copy_base(filename, size) image = self.open(filename) self.assertIn(image.nframes, [0, 1]) self.assertTrue(image.incomplete_file) def test_header_with_half_data(self): filename = os.path.join(self.tmp_directory, self.filename_template % str(self.id())) size = (self.header1 + self.data1) // 2 self.copy_base(filename, size) image = self.open(filename) self.assertEqual(image.nframes, 1) self.assertTrue(image.incomplete_file) frame = image self.assertEqual(frame.header["Image"], "1") self.assertEqual(frame.data[-1].sum(), 0) self.assertTrue(frame.incomplete_data) def test_full_frame_plus_half_header(self): filename = os.path.join(self.tmp_directory, self.filename_template % str(self.id())) size = (self.data1 + self.header2) // 2 self.copy_base(filename, size) image = self.open(filename) self.assertEqual(image.nframes, 1) self.assertTrue(image.incomplete_file) frame = image self.assertEqual(frame.header["Image"], "1") self.assertEqual(frame.data[-1].sum(), 2560) self.assertFalse(frame.incomplete_data) def test_full_frame_plus_header_with_no_data(self): filename = os.path.join(self.tmp_directory, self.filename_template % str(self.id())) size = self.header2 self.copy_base(filename, size) image = self.open(filename) self.assertIn(image.nframes, [1, 2]) self.assertTrue(image.incomplete_file) frame = image self.assertEqual(frame.header["Image"], "1") self.assertEqual(frame.data[-1].sum(), 2560) self.assertFalse(frame.incomplete_data) def test_full_frame_plus_header_with_half_data(self): filename = os.path.join(self.tmp_directory, self.filename_template % str(self.id())) size = (self.header2 + self.data2) // 2 self.copy_base(filename, size) image = self.open(filename) self.assertEqual(image.nframes, 2) self.assertTrue(image.incomplete_file) frame = image.getframe(0) self.assertEqual(frame.header["Image"], "1") self.assertEqual(frame.data[-1].sum(), 2560) self.assertFalse(frame.incomplete_data) frame = image.getframe(1) self.assertEqual(frame.header["Image"], "2") self.assertEqual(frame.data[-1].sum(), 0) self.assertTrue(frame.incomplete_data) class TestBadGzFiles(TestBadFiles): filename_template = "%s.edf.gz" @classmethod def write_header(cls, fd, image_number): with GzipFile(fileobj=fd, mode="wb") as gzfd: TestBadFiles.write_header(gzfd, image_number) @classmethod def write_data(cls, fd): with GzipFile(fileobj=fd, mode="wb") as gzfd: TestBadFiles.write_data(gzfd) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestFlatEdfs)) testsuite.addTest(loadTests(TestBzipEdf)) testsuite.addTest(loadTests(TestGzipEdf)) testsuite.addTest(loadTests(TestEdfs)) testsuite.addTest(loadTests(TestEdfCompressedData)) testsuite.addTest(loadTests(TestEdfMultiFrame)) testsuite.addTest(loadTests(TestEdfFastRead)) testsuite.addTest(loadTests(TestEdfWrite)) testsuite.addTest(loadTests(TestEdfRegression)) testsuite.addTest(loadTests(TestBadFiles)) testsuite.addTest(loadTests(TestBadGzFiles)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testopenheader.py0000644001611600070440000000426613227357030021345 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ # Unit tests Jerome Kieffer, 04/12/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.openimage import openheader class Test1(unittest.TestCase): """openheader opening edf""" def setUp(self): self.name = UtilsTest.getimage("F2K_Seb_Lyso0675_header_only.edf.bz2")[:-4] def testcase(self): """ check openheader can read edf headers""" for ext in ["", ".bz2", ".gz"]: name = self.name + ext obj = openheader(name) logger.debug(" %s obj = %s" % (name, obj.header)) self.assertEqual(obj.header["title"], "ESPIA FRELON Image", "Error on file %s" % name) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(Test1)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite) fabio-0.6.0/fabio/test/testmpaimage.py0000644001611600070440000000525113227357030021006 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """Multiwire Unit tests""" from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os from .utilstest import UtilsTest if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] class TestMpa(unittest.TestCase): """ Test classe for multiwire (mpa) images """ TESTIMAGES = [ # filename dim1 dim2 min max mean stddev ("mpa_test.mpa", 1024, 1024, 0, 1295, 0.8590, 18.9393), ] def test_read(self): """ Test the reading of multiwire images """ for imageData in self.TESTIMAGES: name, dim1, dim2, mini, maxi, mean, stddev = imageData logger.debug("Processing: %s" % name) path = UtilsTest.getimage(name + ".bz2")[:-4] obj = fabio.mpaimage.MpaImage() obj.read(path) self.assertAlmostEqual(mini, obj.getmin(), 2, "getmin [%s,%s]" % (mini, obj.getmin())) self.assertAlmostEqual(maxi, obj.getmax(), 2, "getmax [%s,%s]" % (maxi, obj.getmax())) self.assertAlmostEqual(mean, obj.getmean(), 2, "getmean [%s,%s]" % (mean, obj.getmean())) self.assertAlmostEqual(stddev, obj.getstddev(), 2, "getstddev [%s,%s]" % (stddev, obj.getstddev())) self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestMpa)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testeigerimage.py0000644001611600070440000000502113227357030021317 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # """Test Eiger images """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.openimage import openimage from fabio.eigerimage import EigerImage, h5py def make_hdf5(name, shape=(50, 99, 101)): if h5py is None: raise unittest.SkipTest("h5py is not available") with h5py.File(name) as h: e = h.require_group("entry/data") if len(shape) == 2: e.require_dataset("data", shape, compression="gzip", compression_opts=9, dtype="float32") elif len(shape) == 3: e.require_dataset("data", shape, chunks=(1,) + shape[1:], compression="gzip", compression_opts=9, dtype="float32") class TestEiger(unittest.TestCase): """basic test""" @classmethod def setUpClass(cls): super(TestEiger, cls).setUpClass() cls.fn3 = os.path.join(UtilsTest.tempdir, "eiger3d.h5") make_hdf5(cls.fn3, (50, 99, 101)) @classmethod def tearDownClass(cls): super(TestEiger, cls).tearDownClass() if os.path.exists(cls.fn3): os.unlink(cls.fn3) def test_read(self): """ check we can read images from Eiger""" e = EigerImage() e.read(self.fn3) self.assertEqual(e.dim1, 101, "dim1 OK") self.assertEqual(e.dim2, 99, "dim2 OK") self.assertEqual(e.nframes, 50, "nframe: got %s!=50" % e.nframes) self.assertEqual(e.bpp, 4, "bpp OK") def test_open(self): """ check we can read images from Eiger""" e = openimage(self.fn3) self.assertEqual(e.dim1, 101, "dim1 OK") self.assertEqual(e.dim2, 99, "dim2 OK") self.assertEqual(e.nframes, 50, "nframe: got %s!=50" % e.nframes) self.assertEqual(e.bpp, 4, "bpp OK") def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestEiger)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testjpegimage.py0000644001611600070440000001153213227357030021155 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Test JPEG format """ from __future__ import print_function, with_statement, division, absolute_import import unittest import os import sys import tempfile import shutil from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from .. import jpegimage TEST_DIRECTORY = None # Temporary directory where storing test data def setUpModule(): global TEST_DIRECTORY TEST_DIRECTORY = tempfile.mkdtemp(prefix="%s_data_" % __name__) def tearDownModule(): shutil.rmtree(TEST_DIRECTORY) class TestJpegImage(unittest.TestCase): """Test the class format""" def setUp(self): if jpegimage.Image is None: self.skipTest("PIL is not available") def test_read_uint8(self): filename = UtilsTest.getimage("rand_uint8.jpg.bz2")[:-4] image_format = jpegimage.JpegImage() image = image_format.read(filename) self.assertEqual(image.data.shape, (64, 64)) self.assertIn("jfif", image.header) def test_read_failing_file(self): filename = os.path.join(TEST_DIRECTORY, "2.jpg") filename_source = UtilsTest.getimage("rand_uint8.jpg.bz2")[:-4] with open(filename_source, "r+b") as fsource: with open(filename, "w+b") as ftest: ftest.write(fsource.read()) ftest.seek(1) ftest.write(b".") image_format = jpegimage.JpegImage() try: _image = image_format.read(filename) self.fail() except IOError: pass def test_read_empty_file(self): filename = os.path.join(TEST_DIRECTORY, "3.jpg") f = open(filename, "wb") f.close() image_format = jpegimage.JpegImage() try: _image = image_format.read(filename) self.fail() except IOError: pass def test_read_missing_file(self): filename = os.path.join(TEST_DIRECTORY, "4.jpg") image_format = jpegimage.JpegImage() try: _image = image_format.read(filename) self.fail() except IOError: pass class TestPilNotAvailable(unittest.TestCase): def setUp(self): filename = UtilsTest.getimage("rand_uint8.jpg.bz2")[:-4] self.filename = filename self.old = jpegimage.Image def tearDown(self): jpegimage.Image = self.old self.filename = None self.data = None def open_image(self): return fabio.open(self.filename) def test_with_pil(self): if jpegimage.Image is None: self.skipTest("PIL is not available") image = self.open_image() self.assertIsInstance(image, jpegimage.JpegImage) self.assertEqual(image.data.shape, (64, 64)) self.assertIn("jfif", image.header) def test_without_pil(self): try: old = jpegimage.Image jpegimage.Image = None try: _image = self.open_image() self.fail() except IOError: pass finally: jpegimage.Image = old class TestJpegImageInsideFabio(unittest.TestCase): """Test the format inside the fabio framework""" def test_read_uint8(self): if jpegimage.Image is None: self.skipTest("PIL is not available") filename = UtilsTest.getimage("rand_uint8.jpg.bz2")[:-4] image = fabio.open(filename) self.assertIsInstance(image, jpegimage.JpegImage) self.assertEqual(image.data.shape, (64, 64)) self.assertIn("jfif", image.header) def suite(): loader = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loader(TestJpegImage)) testsuite.addTest(loader(TestJpegImageInsideFabio)) testsuite.addTest(loader(TestPilNotAvailable)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testfabioimage.py0000644001611600070440000001714313227357030021314 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Test cases for the fabioimage class testsuite by Jerome Kieffer (Jerome.Kieffer@esrf.eu) 28/11/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os import numpy if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from ..fabioimage import fabioimage from .. import fabioutils from ..utils import pilutils class Test50000(unittest.TestCase): """ test with 50000 everywhere""" def setUp(self): """make the image""" dat = numpy.ones((1024, 1024), numpy.uint16) dat = (dat * 50000).astype(numpy.uint16) assert dat.dtype.char == numpy.ones((1), numpy.uint16).dtype.char hed = {"Title": "50000 everywhere"} self.obj = fabioimage(dat, hed) def testgetmax(self): """check max""" self.assertEqual(self.obj.getmax(), 50000) def testgetmin(self): """check min""" self.assertEqual(self.obj.getmin(), 50000) def testgetmean(self): """check mean""" self.assertEqual(self.obj.getmean(), 50000) def getstddev(self): """check stddev""" self.assertEqual(self.obj.getstddev(), 0) class TestSlices(unittest.TestCase): """check slicing""" def setUp(self): """make test data""" dat2 = numpy.zeros((1024, 1024), numpy.uint16) hed = {"Title": "zeros and 100"} self.cord = [256, 256, 790, 768] self.obj = fabioimage(dat2, hed) self.slic = slic = self.obj.make_slice(self.cord) # Note - d2 is modified *after* fabioimage is made dat2[slic] = dat2[slic] + 100 assert self.obj.maxval is None assert self.obj.minval is None self.npix = (slic[0].stop - slic[0].start) * \ (slic[1].stop - slic[1].start) def testgetmax(self): """check max""" self.assertEqual(self.obj.getmax(), 100) def testgetmin(self): """check min""" self.assertEqual(self.obj.getmin(), 0) def testintegratearea(self): """ check integrations""" self.obj.resetvals() area1 = self.obj.integrate_area(self.cord) self.obj.resetvals() area2 = self.obj.integrate_area(self.slic) self.assertEqual(area1, area2) self.assertEqual(area1, self.npix * 100) def testRebin(self): """Test the rebin method""" big = numpy.arange(64).reshape((8, 8)) res = numpy.array([[13, 17], [45, 49]]) fabimg = fabioimage(data=big, header={}) fabimg.rebin(4, 4) self.assertEqual(abs(res - fabimg.data).max(), 0, "data are the same after rebin") class TestOpen(unittest.TestCase): """check opening compressed files""" testfile = os.path.join(UtilsTest.tempdir, "testfile") def setUp(self): """ create test files""" if not os.path.isfile(self.testfile): with open(self.testfile, "wb") as f: f.write(b"{ hello }") if not os.path.isfile(self.testfile + ".gz"): with fabioutils.GzipFile(self.testfile + ".gz", "wb") as wf: wf.write(b"{ hello }") if not os.path.isfile(self.testfile + ".bz2"): with fabioutils.BZ2File(self.testfile + ".bz2", "wb") as wf: wf.write(b"{ hello }") self.obj = fabioimage() def testFlat(self): """ no compression""" res = self.obj._open(self.testfile).read() self.assertEqual(res, b"{ hello }") def testgz(self): """ gzipped """ res = self.obj._open(self.testfile + ".gz").read() self.assertEqual(res, b"{ hello }") def testbz2(self): """ bzipped""" res = self.obj._open(self.testfile + ".bz2").read() self.assertEqual(res, b"{ hello }") NAMES = {numpy.uint8: "numpy.uint8", numpy.int8: "numpy.int8", numpy.uint16: "numpy.uint16", numpy.int16: "numpy.int16", numpy.uint32: "numpy.uint32", numpy.int32: "numpy.int32", numpy.float32: "numpy.float32", numpy.float64: "numpy.float64"} class TestPilImage(unittest.TestCase): """ check PIL creation""" def setUp(self): if pilutils.Image is None: self.skipTest("PIL is not available") """ list of working numeric types""" self.okformats = [numpy.uint8, numpy.int8, numpy.uint16, numpy.int16, numpy.uint32, numpy.int32, numpy.float32] def mkdata(self, shape, typ): """ generate [01] testdata """ return (numpy.random.random(shape)).astype(typ) def testpil(self): for typ in self.okformats: name = NAMES[typ] for shape in [(10, 20), (431, 1325)]: testdata = self.mkdata(shape, typ) img = fabioimage(testdata, {"title": "Random data"}) pim = img.toPIL16() for i in [0, 5, 6, shape[1] - 1]: for j in [0, 5, 7, shape[0] - 1]: errstr = name + " %d %d %f %f t=%s" % ( i, j, testdata[j, i], pim.getpixel((i, j)), typ) er1 = img.data[j, i] - pim.getpixel((i, j)) er2 = img.data[j, i] + pim.getpixel((i, j)) # difference as % error in case of rounding if er2 != 0.: err = er1 / er2 else: err = er1 self.assertAlmostEqual(err, 0, 6, errstr) class TestPilImage2(TestPilImage): """ check with different numbers""" def mkdata(self, shape, typ): """ positive and big""" return (numpy.random.random(shape) * sys.maxsize / 10).astype(typ) class TestPilImage3(TestPilImage): """ check with different numbers""" def mkdata(self, shape, typ): """ positive, negative and big""" return ((numpy.random.random(shape) - 0.5) * sys.maxsize / 10).astype(typ) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(Test50000)) testsuite.addTest(loadTests(TestSlices)) testsuite.addTest(loadTests(TestOpen)) testsuite.addTest(loadTests(TestPilImage)) testsuite.addTest(loadTests(TestPilImage2)) testsuite.addTest(loadTests(TestPilImage3)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testOXDimage.py0000644001611600070440000001543613227357030020671 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE # """ # Unit tests for OXD image (Oxford diffraction now Rigaku) """ from __future__ import print_function, with_statement, division, absolute_import __author__ = "Jerome Kieffer" __license__ = "MIT" __date__ = "2016-11-23" __contact__ = "jerome.kieffer@esrf.fr" import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.OXDimage import OXDimage # filename dim1 dim2 min max mean stddev values are from OD Sapphire 3.0 TESTIMAGES = [ ("b191_1_9_1.img", 512, 512, -500, 11975, 25.70, 90.2526, "Sapphire2"), ("b191_1_9_1_uncompressed.img", 512, 512, -500, 11975, 25.70, 90.2526, "Sapphire2"), ("100nmfilmonglass_1_1.img", 1024, 1024, -172, 460, 44.20, 63.0245, "Sapphire3"), ("pilatus300k.rod_img", 487, 619, -2, 173075, 27.315, 538.938, "Pilatus")] class TestOxd(unittest.TestCase): def setUp(self): self.fn = {} for vals in TESTIMAGES: name = vals[0] self.fn[name] = UtilsTest.getimage(name + ".bz2")[:-4] for i in self.fn: assert os.path.exists(self.fn[i]) def tearDown(self): unittest.TestCase.tearDown(self) self.fn = {} def test_read(self): "Test reading of compressed OXD images" for vals in TESTIMAGES: name = vals[0] dim1, dim2 = vals[1:3] mini, maxi, mean, stddev = vals[3:7] detector_type = vals[7] obj = OXDimage() obj.read(self.fn[name]) self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") self.assertAlmostEqual(mini, obj.getmin(), 2, "getmin on " + name) self.assertAlmostEqual(maxi, obj.getmax(), 2, "getmax on " + name) self.assertAlmostEqual(mean, obj.getmean(), 2, "getmean on " + name) self.assertAlmostEqual(stddev, obj.getstddev(), 2, "getstddev on " + name) self.assertIn(detector_type, obj.header["Detector type"], "detector type on " + name) def test_write(self): "Test writing with self consistency at the fabio level" for vals in TESTIMAGES: name = vals[0] obj = OXDimage() obj.read(self.fn[name]) if obj.header.get("Compression") not in ["NO ", "TY1"]: logger.info("Skip write test for now") continue obj.write(os.path.join(UtilsTest.tempdir, name)) other = OXDimage() other.read(os.path.join(UtilsTest.tempdir, name)) self.assertEqual(abs(obj.data - other.data).max(), 0, "data are not the same for %s" % name) for key in obj.header: if key == "filename": continue self.assertTrue(key in other.header, "Key %s is in header" % key) self.assertEqual(obj.header[key], other.header[key], "metadata '%s' are not the same for %s" % (key, name)) os.unlink(os.path.join(UtilsTest.tempdir, name)) class TestOxdSame(unittest.TestCase): def setUp(self): self.fn = {} for i in ["b191_1_9_1.img", "b191_1_9_1_uncompressed.img"]: self.fn[i] = UtilsTest.getimage(i + ".bz2")[:-4] for i in self.fn: assert os.path.exists(self.fn[i]) def tearDown(self): unittest.TestCase.tearDown(self) self.fn = {} def test_same(self): """test if images are actually the same""" o1 = fabio.open(self.fn["b191_1_9_1.img"]) o2 = fabio.open(self.fn["b191_1_9_1_uncompressed.img"]) for attr in ["getmin", "getmax", "getmean", "getstddev"]: a1 = getattr(o1, attr)() a2 = getattr(o2, attr)() self.assertEqual(a1, a2, "testing %s: %s | %s" % (attr, a1, a2)) class TestOxdBig(unittest.TestCase): """class to test bugs if OI is large (lot of exceptions 16 bits)""" def setUp(self): self.fn = {} for i in ["d80_60s.img", "d80_60s.edf"]: self.fn[i] = UtilsTest.getimage(i + ".bz2")[:-4] for i in self.fn: self.assertTrue(os.path.exists(self.fn[i]), self.fn[i]) def tearDown(self): unittest.TestCase.tearDown(self) self.fn = {} def test_same(self): df = [fabio.open(i).data for i in self.fn.values()] self.assertEqual(abs(df[0] - df[1]).max(), 0, "Data are the same") class TestConvert(unittest.TestCase): def setUp(self): self.fn = {} for i in ["face.msk"]: self.fn[i] = UtilsTest.getimage(i + ".bz2")[:-4] for i in self.fn: self.assertTrue(os.path.exists(self.fn[i]), self.fn[i]) def tearDown(self): unittest.TestCase.tearDown(self) self.fn = {} def test_convert(self): fn = self.fn["face.msk"] dst = os.path.join(UtilsTest.tempdir, "face.oxd") fabio.open(fn).convert("oxd").save(dst) self.assertTrue(os.path.exists(dst), "destination file exists") os.unlink(dst) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestOxd)) testsuite.addTest(loadTests(TestOxdSame)) testsuite.addTest(loadTests(TestOxdBig)) testsuite.addTest(loadTests(TestConvert)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testadscimage.py0000644001611600070440000001004713227357030021142 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ # Unit tests # builds on stuff from ImageD11.test.testpeaksearch Updated by Jerome Kieffer (jerome.kieffer@esrf.eu), 2011 28/11/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.adscimage import adscimage from fabio.edfimage import edfimage # statistics come from fit2d I think # filename dim1 dim2 min max mean stddev TESTIMAGES = """mb_LP_1_001.img 3072 3072 0.0000 65535. 120.33 147.38 mb_LP_1_001.img.gz 3072 3072 0.0000 65535. 120.33 147.38 mb_LP_1_001.img.bz2 3072 3072 0.0000 65535. 120.33 147.38""" class TestMatch(unittest.TestCase): """ check the ??fit2d?? conversion to edf gives same numbers """ def setUp(self): """ Download images """ self.fn_adsc = UtilsTest.getimage("mb_LP_1_001.img.bz2")[:-4] self.fn_edf = UtilsTest.getimage("mb_LP_1_001.edf.bz2")[:-4] def testsame(self): """test ADSC image match to EDF""" im1 = edfimage() im1.read(self.fn_edf) im2 = adscimage() im2.read(self.fn_adsc) diff = (im1.data.astype("float32") - im2.data.astype("float32")) logger.debug("type: %s %s shape %s %s " % (im1.data.dtype, im2.data.dtype, im1.data.shape, im2.data.shape)) logger.debug("im1 min %s %s max %s %s " % (im1.data.min(), im2.data.min(), im1.data.max(), im2.data.max())) logger.debug("delta min %s max %s mean %s" % (diff.min(), diff.max(), diff.mean())) self.assertEqual(abs(diff).max(), 0.0, "asdc data == edf data") class TestFlatMccdsAdsc(unittest.TestCase): """ """ def setUp(self): """ Download images """ self.im_dir = os.path.dirname(UtilsTest.getimage("mb_LP_1_001.img.bz2")) def test_read(self): """ check we can read flat ADSC images""" for line in TESTIMAGES.split("\n"): vals = line.split() name = vals[0] dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] obj = adscimage() obj.read(os.path.join(self.im_dir, name)) self.assertAlmostEqual(mini, obj.getmin(), 2, "getmin") self.assertAlmostEqual(maxi, obj.getmax(), 2, "getmax") got_mean = obj.getmean() self.assertAlmostEqual(mean, got_mean, 2, "getmean exp %s != got %s" % (mean, got_mean)) self.assertAlmostEqual(stddev, obj.getstddev(), 2, "getstddev") self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestMatch)) testsuite.addTest(loadTests(TestFlatMccdsAdsc)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testpilatusimage.py0000644001611600070440000000557013227357030021716 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """Pilatus Tiff Unit tests""" from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] class TestPilatus(unittest.TestCase): # filename dim1 dim2 min max mean stddev TESTIMAGES = """ lysb_5mg-1.90s_SAXS.bz2 487 619 0 1300 29.4260 17.7367 lysb_5mg-1.90s_SAXS.gz 487 619 0 1300 29.4260 17.7367 lysb_5mg-1.90s_SAXS 487 619 0 1300 29.4260 17.7367 """ def test_read(self): """ Test the reading of Mar345 images """ for line in self.TESTIMAGES.split('\n'): vals = line.strip().split() if not vals: continue name = vals[0] logger.debug("Processing: %s" % name) dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] obj = fabio.pilatusimage.PilatusImage() obj.read(UtilsTest.getimage(name)) self.assertAlmostEqual(mini, obj.getmin(), 2, "getmin [%s,%s]" % (mini, obj.getmin())) self.assertAlmostEqual(maxi, obj.getmax(), 2, "getmax [%s,%s]" % (maxi, obj.getmax())) self.assertAlmostEqual(mean, obj.getmean(), 2, "getmean [%s,%s]" % (mean, obj.getmean())) self.assertAlmostEqual(stddev, obj.getstddev(), 2, "getstddev [%s,%s]" % (stddev, obj.getstddev())) self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestPilatus)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/setup.py0000644001611600070440000000320213227357030017460 0ustar kiefferscisoft00000000000000# coding: utf-8 # /*########################################################################## # Copyright (C) 2016 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ############################################################################*/ __authors__ = ["V. Valls"] __license__ = "MIT" __date__ = "31/07/2017" from numpy.distutils.misc_util import Configuration def configuration(parent_package='', top_path=None): config = Configuration('test', parent_package, top_path) return config if __name__ == "__main__": from numpy.distutils.core import setup setup(configuration=configuration) fabio-0.6.0/fabio/test/testopenimage.py0000644001611600070440000001727313227357030021201 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ # Unit tests builds on stuff from ImageD11.test.testpeaksearch Jerome Kieffer 04/12/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.openimage import openimage from fabio.edfimage import edfimage from fabio.marccdimage import marccdimage from fabio.fit2dmaskimage import fit2dmaskimage from fabio.OXDimage import OXDimage from fabio.brukerimage import brukerimage from fabio.adscimage import adscimage class TestOpenEdf(unittest.TestCase): """openimage opening edf""" def checkFile(self, filename): """ check we can read EDF image with openimage""" obj = openimage(filename) obj2 = edfimage() obj2.read(filename) self.assertEqual(obj.data[10, 10], obj2.data[10, 10]) self.assertEqual(type(obj), type(obj2)) self.assertEqual(abs(obj.data.astype(int) - obj2.data.astype(int)).sum(), 0) def testEdf(self): fname = "F2K_Seb_Lyso0675.edf.bz2" filename = UtilsTest.getimage(fname)[:-4] self.checkFile(filename) def testEdfGz(self): fname = "F2K_Seb_Lyso0675.edf.gz" filename = UtilsTest.getimage(fname) self.checkFile(filename) def testEdfBz2(self): fname = "F2K_Seb_Lyso0675.edf.bz2" filename = UtilsTest.getimage(fname) self.checkFile(filename) class TestOpenMccd(unittest.TestCase): """openimage opening mccd""" def checkFile(self, filename): """ check we can read it""" obj = openimage(filename) obj2 = marccdimage() obj2.read(filename) self.assertEqual(obj.data[10, 10], obj2.data[10, 10]) self.assertEqual(type(obj), type(obj2)) self.assertEqual(abs(obj.data.astype(int) - obj2.data.astype(int)).sum(), 0) def testMccd(self): fname = "somedata_0001.mccd.bz2" filename = UtilsTest.getimage(fname)[:-4] self.checkFile(filename) def testMccdGz(self): fname = "somedata_0001.mccd.gz" filename = UtilsTest.getimage(fname) self.checkFile(filename) def testMccdBz2(self): fname = "somedata_0001.mccd.bz2" filename = UtilsTest.getimage(fname) self.checkFile(filename) class TestOpenMask(unittest.TestCase): """openimage opening fit2d msk""" def checkFile(self, filename): """ check we can read Fit2D mask with openimage""" obj = openimage(filename) obj2 = fit2dmaskimage() obj2.read(filename) self.assertEqual(obj.data[10, 10], obj2.data[10, 10]) self.assertEqual(type(obj), type(obj2)) self.assertEqual(abs(obj.data.astype(int) - obj2.data.astype(int)).sum(), 0) self.assertEqual(abs(obj.data.astype(int) - obj2.data.astype(int)).sum(), 0) def testMask(self): """openimage opening fit2d msk""" fname = "face.msk.bz2" filename = UtilsTest.getimage(fname)[:-4] self.checkFile(filename) def testMaskGz(self): """openimage opening fit2d msk gzip""" fname = "face.msk.gz" filename = UtilsTest.getimage(fname) self.checkFile(filename) def testMaskBz2(self): """openimage opening fit2d msk bzip""" fname = "face.msk.bz2" filename = UtilsTest.getimage(fname) self.checkFile(filename) class TestOpenBruker(unittest.TestCase): """openimage opening bruker""" def checkFile(self, filename): """ check we can read it""" obj = openimage(filename) obj2 = brukerimage() obj2.read(filename) self.assertEqual(obj.data[10, 10], obj2.data[10, 10]) self.assertEqual(type(obj), type(obj2)) self.assertEqual(abs(obj.data.astype(int) - obj2.data.astype(int)).sum(), 0) def testBruker(self): """openimage opening bruker""" fname = "Cr8F8140k103.0026.bz2" filename = UtilsTest.getimage(fname)[:-4] self.checkFile(filename) def testBrukerGz(self): """openimage opening bruker gzip""" fname = "Cr8F8140k103.0026.gz" filename = UtilsTest.getimage(fname) self.checkFile(filename) def testBrukerBz2(self): """openimage opening bruker bzip""" fname = "Cr8F8140k103.0026.bz2" filename = UtilsTest.getimage(fname) self.checkFile(filename) class TestOpenAdsc(unittest.TestCase): """openimage opening adsc""" def checkFile(self, filename): """ check we can read it""" obj = openimage(filename) obj2 = adscimage() obj2.read(filename) self.assertEqual(obj.data[10, 10], obj2.data[10, 10]) self.assertEqual(type(obj), type(obj2)) self.assertEqual(abs(obj.data.astype(int) - obj2.data.astype(int)).sum(), 0) def testAdsc(self): """openimage opening adsc""" fname = "mb_LP_1_001.img.bz2" filename = UtilsTest.getimage(fname)[:-4] self.checkFile(filename) def testAdscGz(self): """openimage opening adsc gzip""" fname = "mb_LP_1_001.img.gz" filename = UtilsTest.getimage(fname) self.checkFile(filename) def testAdscBz2(self): """openimage opening adsc bzip""" fname = "mb_LP_1_001.img.bz2" filename = UtilsTest.getimage(fname) self.checkFile(filename) class TestOpenOxd(unittest.TestCase): """openimage opening adsc""" def checkFile(self, filename): """ check we can read OXD images with openimage""" obj = openimage(filename) obj2 = OXDimage() obj2.read(filename) self.assertEqual(obj.data[10, 10], obj2.data[10, 10]) self.assertEqual(type(obj), type(obj2)) self.assertEqual(abs(obj.data.astype(int) - obj2.data.astype(int)).sum(), 0) def testOxd(self): """openimage opening adsc""" fname = "b191_1_9_1.img.bz2" filename = UtilsTest.getimage(fname)[:-4] self.checkFile(filename) def testOxdUnc(self): """openimage opening adsc""" fname = "b191_1_9_1_uncompressed.img.bz2" filename = UtilsTest.getimage(fname)[:-4] self.checkFile(filename) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestOpenAdsc)) testsuite.addTest(loadTests(TestOpenBruker)) testsuite.addTest(loadTests(TestOpenEdf)) testsuite.addTest(loadTests(TestOpenMask)) testsuite.addTest(loadTests(TestOpenMccd)) testsuite.addTest(loadTests(TestOpenOxd)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testkcdimage.py0000755001611600070440000000653513227357030021003 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Test for Nonius Kappa CCD cameras. """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from ..kcdimage import kcdimage from ..openimage import openimage class TestKcd(unittest.TestCase): """basic test""" kcdfilename = 'i01f0001.kcd' edffilename = 'i01f0001.edf' results = """i01f0001.kcd 625 576 96 66814.0 195.3862972 243.58150990245315""" def setUp(self): """Download files""" self.fn = {} for i in ["i01f0001.kcd", "i01f0001.edf"]: self.fn[i] = UtilsTest.getimage(i + ".bz2")[:-4] for i in self.fn: assert os.path.exists(self.fn[i]) def test_read(self): """ check we can read kcd images""" vals = self.results.split() dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] for ext in ["", ".gz", ".bz2"]: try: obj = openimage(self.fn[self.kcdfilename] + ext) except Exception as err: logger.error("unable to read: %s", self.fn[self.kcdfilename] + ext) raise err self.assertAlmostEqual(mini, obj.getmin(), 4, "getmin" + ext) self.assertAlmostEqual(maxi, obj.getmax(), 4, "getmax" + ext) self.assertAlmostEqual(mean, obj.getmean(), 4, "getmean" + ext) self.assertAlmostEqual(stddev, obj.getstddev(), 4, "getstddev" + ext) self.assertEqual(dim1, obj.dim1, "dim1" + ext) self.assertEqual(dim2, obj.dim2, "dim2" + ext) def test_same(self): """ see if we can read kcd images and if they are the same as the EDF """ kcd = kcdimage() kcd.read(self.fn[self.kcdfilename]) edf = fabio.open(self.fn[self.edffilename]) diff = (kcd.data.astype("int32") - edf.data.astype("int32")) self.assertAlmostEqual(abs(diff).sum(dtype=int), 0, 4) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestKcd)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/test_formats.py0000755001611600070440000000414213227357030021041 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ # Unit tests # builds on stuff from ImageD11.test.testpeaksearch 28/11/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from .. import fabioformats class TestRegistration(unittest.TestCase): def test_annotation(self): @fabio.register class MyFormat1(fabio.fabioimage.FabioImage): pass self.assertIsNotNone(fabioformats.get_class_by_name("myformat1")) def test_function(self): class MyFormat2(fabio.fabioimage.FabioImage): pass fabio.register(MyFormat2) self.assertIsNotNone(fabioformats.get_class_by_name("myformat2")) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestRegistration)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/test_filename_steps.py0000644001611600070440000000542613227357030022367 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Test cases for the Next/Previous ... 28/11/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] class TestNext(unittest.TestCase): def test_next1(self): for name, next_ in [["data0001.edf", "data0002.edf"], ["bob1.edf", "bob2.edf"], ["1.edf", "2.edf"], ["1.mar2300", "2.mar2300"], ]: self.assertEqual(next_, fabio.next_filename(name)) class TestPrev(unittest.TestCase): def test_prev1(self): for name, prev in [["data0001.edf", "data0000.edf"], ["bob1.edf", "bob0.edf"], ["1.edf", "0.edf" ], ["1.mar2300", "0.mar2300"], ]: self.assertEqual(prev, fabio.previous_filename(name)) class TestJump(unittest.TestCase): def test_jump1(self): for name, res, num in [["data0001.edf", "data99993.edf", 99993], ["bob1.edf", "bob0.edf", 0], ["1.edf", "123456.edf", 123456], ["mydata001.mar2300.gz", "mydata003.mar2300.gz", 3], ]: self.assertEqual(res, fabio.jump_filename(name, num)) def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestNext)) testsuite.addTest(loadTests(TestPrev)) testsuite.addTest(loadTests(TestJump)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/test_file_series.py0000644001611600070440000000725213227357030021661 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ test cases for fileseries 28/11/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os import bz2 if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.file_series import numbered_file_series, file_series class TestRandomSeries(unittest.TestCase): """arbitrary series""" def setUp(self): """sets up""" self.fso = file_series(["first", "second", "last"]) def testfirst(self): """check first""" self.assertEqual("first", self.fso.first()) def testlast(self): """check first""" self.assertEqual("last", self.fso.last()) def testjump(self): """check jump""" self.assertEqual("second", self.fso.jump(1)) class TestEdfNumbered(unittest.TestCase): """ Typical sequence of edf files """ def setUp(self): """ note extension has the . in it""" self.fso = numbered_file_series("mydata", 0, 10005, ".edf") def testfirst(self): """ first in series""" self.assertEqual(self.fso.first(), "mydata0000.edf") def testlast(self): """ last in series""" self.assertEqual(self.fso.last(), "mydata10005.edf") def testnext(self): """ check all in order """ mylist = ["mydata%04d.edf" % (i) for i in range(0, 10005)] i = 1 while i < len(mylist): self.assertEqual(mylist[i], self.fso.next()) i += 1 def testprevious(self): """ check all in order """ mylist = ["mydata%04d.edf" % (i) for i in range(0, 10005)] i = 10003 self.fso.jump(10004) while i > 0: self.assertEqual(mylist[i], self.fso.previous()) i -= 1 def testprevjump(self): """check current""" self.fso.jump(9999) self.assertEqual("mydata9999.edf", self.fso.current()) self.assertEqual("mydata9998.edf", self.fso.previous()) def testnextjump(self): """check current""" self.fso.jump(9999) self.assertEqual("mydata9999.edf", self.fso.current()) self.assertEqual("mydata10000.edf", self.fso.next()) def testlen(self): """check len""" self.assertEqual(self.fso.len(), 10006) # +1 for 0000 def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestRandomSeries)) testsuite.addTest(loadTests(TestEdfNumbered)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testXSDimage.py0000755001611600070440000000743113227357030020674 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ # Unit tests # builds on stuff from ImageD11.test.testpeaksearch """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] import fabio.xsdimage import numpy # filename dim1 dim2 min max mean stddev values are from OD Sapphire 3.0 TESTIMAGES = """XSDataImage.xml 512 512 86 61204 511.63 667.15 XSDataImageInv.xml 512 512 -0.2814 0.22705039 2.81e-08 0.010""" class TestXSD(unittest.TestCase): def setUp(self): if fabio.xsdimage.etree is None: self.skipTest("etree is not available") self.fn = {} for i in ["XSDataImage.edf", "XSDataImage.xml", "XSDataImageInv.xml"]: self.fn[i] = UtilsTest.getimage(i + ".bz2")[:-4] def test_read(self): "Test reading of XSD images" for line in TESTIMAGES.split("\n"): vals = line.split() name = vals[0] dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] obj = fabio.xsdimage.xsdimage() obj.read(self.fn[name]) self.assertAlmostEqual(mini, obj.getmin(), 2, "getmin") self.assertAlmostEqual(maxi, obj.getmax(), 2, "getmax") self.assertAlmostEqual(mean, obj.getmean(), 2, "getmean") logger.info("%s %s %s" % (name, stddev, obj.getstddev())) self.assertAlmostEqual(stddev, obj.getstddev(), 2, "getstddev") self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") def test_same(self): """ test if an image is the same as the EDF equivalent""" xsd = fabio.open(self.fn["XSDataImage.xml"]) edf = fabio.open(self.fn["XSDataImage.edf"]) self.assertAlmostEqual(0, abs(xsd.data - edf.data).max(), 1, "images are the same") def test_invert(self): """ Tests that 2 matrixes are invert """ m1 = fabio.open(self.fn["XSDataImage.xml"]) m2 = fabio.open(self.fn["XSDataImageInv.xml"]) delta = abs((numpy.matrix(m1.data) * numpy.matrix(m2.data)) - numpy.identity(m1.data.shape[0])).max() if delta >= 1e-3: logger.error("Matrices are not invert of each other !!! prod = %s", numpy.matrix(m1.data) * numpy.matrix(m2.data)) self.assertTrue(delta < 1e-3, "matrices are invert of each other") def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestXSD)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite()) fabio-0.6.0/fabio/test/testmar345image.py0000644001611600070440000002105513227357030021244 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Project: Fable Input Output # https://github.com/silx-kit/fabio # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ # Unit tests # builds on stuff from ImageD11.test.testpeaksearch 28/11/2014 """ from __future__ import print_function, with_statement, division, absolute_import import unittest import sys import os import numpy import logging if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest logger = UtilsTest.get_logger(__file__) fabio = sys.modules["fabio"] from fabio.mar345image import mar345image # filename dim1 dim2 min max mean stddev TESTIMAGES = """example.mar2300 2300 2300 0 999999 180.15 4122.67 example.mar2300.bz2 2300 2300 0 999999 180.15 4122.67 example.mar2300.gz 2300 2300 0 999999 180.15 4122.67 Fe3O4_023_101.mar2560 2560 3072 0 258253 83.61749 198.29895739775 Fe3O4_023_101.mar2560.bz2 2560 3072 0 258253 83.61749 198.29895739775 Fe3O4_023_101.mar2560.gz 2560 3072 0 258253 83.61749 198.29895739775""" # Fe3O4_023_101.mar2560 is a pathological file from Mar555 class TestMar345(unittest.TestCase): def setUp(self): """ download images """ self.mar345 = UtilsTest.getimage("example.mar2300.bz2")[:-4] self.mar555 = UtilsTest.getimage("Fe3O4_023_101.mar2560.bz2")[:-4] def tearDown(self): unittest.TestCase.tearDown(self) self.mar345 = self.mar555 = None def test_read(self): """ Test the reading of Mar345 images """ for line in TESTIMAGES.split('\n'): vals = line.strip().split() name = vals[0] dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] obj = mar345image() obj.read(UtilsTest.getimage(name)) self.assertAlmostEqual(mini, obj.getmin(), 2, "getmin [%s,%s]" % (mini, obj.getmin())) self.assertAlmostEqual(maxi, obj.getmax(), 2, "getmax [%s,%s]" % (maxi, obj.getmax())) self.assertAlmostEqual(mean, obj.getmean(), 2, "getmean [%s,%s]" % (mean, obj.getmean())) self.assertAlmostEqual(stddev, obj.getstddev(), 2, "getstddev [%s,%s]" % (stddev, obj.getstddev())) self.assertEqual(dim1, obj.dim1, "dim1") self.assertEqual(dim2, obj.dim2, "dim2") def test_write(self): "Test writing with self consistency at the fabio level" for line in TESTIMAGES.split("\n"): logger.debug("Processing file: %s" % line) vals = line.split() name = vals[0] obj = mar345image() obj.read(os.path.join(os.path.dirname(self.mar345), name)) obj.write(os.path.join(UtilsTest.tempdir, name)) other = mar345image() other.read(os.path.join(UtilsTest.tempdir, name)) if abs(obj.data - other.data).max(): logger.error("data for %s are not the same %s", line, numpy.where(obj.data - other.data)) self.assertEqual(abs(obj.data - other.data).max(), 0, "data are the same") for key in obj.header: if key == "filename": continue self.assertTrue(key in other.header, "Key %s is in header" % key) self.assertEqual(obj.header[key], other.header[key], "value are the same for key %s: [%s|%s]" % (key, obj.header[key], other.header[key])) os.unlink(os.path.join(UtilsTest.tempdir, name)) def test_byteswap_write(self): "Test writing with self consistency at the fabio level" for line in TESTIMAGES.split("\n"): logger.debug("Processing file: %s" % line) vals = line.split() name = vals[0] obj = mar345image() obj.read(os.path.join(os.path.dirname(self.mar345), name)) obj.swap_needed = not (obj.swap_needed) obj.write(os.path.join(UtilsTest.tempdir, name)) other = mar345image() other.read(os.path.join(UtilsTest.tempdir, name)) self.assertEqual(abs(obj.data - other.data).max(), 0, "data are the same") for key in obj.header: if key == "filename": continue self.assertTrue(key in other.header, "Key %s is in header" % key) self.assertEqual(obj.header[key], other.header[key], "value are the same for key %s: [%s|%s]" % (key, obj.header[key], other.header[key])) os.unlink(os.path.join(UtilsTest.tempdir, name)) @unittest.skip("very slow test") def test_memoryleak(self): """ This test takes a lot of time, so only in debug mode. """ N = 1000 if logger.getEffectiveLevel() <= logging.INFO: logger.debug("Testing for memory leak") for i in range(N): _img = fabio.open(self.mar345) print("reading #%s/%s" % (i, N)) def test_aux(self): """test auxillary functions """ shape = 120, 130 size = shape[0] * shape[1] import fabio.ext.mar345_IO img = numpy.random.randint(0, 32000, size).astype("int16") b = fabio.ext.mar345_IO.precomp(img, shape[-1]) c = fabio.ext.mar345_IO.postdec(b, shape[-1]) self.assertEqual(abs(c - img).max(), 0, "pre-compression and post-decompression works") a = fabio.ext.mar345_IO.calc_nb_bits(numpy.arange(8).astype("int32"), 0, 8) self.assertEqual(a, 32, "8*4") a = fabio.ext.mar345_IO.calc_nb_bits(numpy.arange(10).astype("int32"), 0, 10) self.assertEqual(a, 50, "10*5") a = fabio.ext.mar345_IO.calc_nb_bits(numpy.arange(50).astype("int32"), 0, 50) self.assertEqual(a, 350, 50 * 7) img.shape = shape cmp_ccp4 = fabio.ext.mar345_IO.compress_pck(img, use_CCP4=True) cmp_fab = fabio.ext.mar345_IO.compress_pck(img, use_CCP4=False) delta = abs(len(cmp_fab) - len(cmp_ccp4)) if len(cmp_fab) > len(cmp_ccp4): logger.error("len(fabio): %s len(ccp4):%s", len(cmp_fab), len(cmp_ccp4)) self.assertLessEqual(delta, 10, "Compression by FabIO is similar to CCP4") img_c_c = fabio.ext.mar345_IO.uncompress_pck(cmp_ccp4, overflowPix=False, use_CCP4=True) delta = img_c_c - img ok = abs(delta).ravel() if ok.max() > 0: logger.error("img_c_c: %s %s" % numpy.where(delta)) self.assertEqual(ok.max(), 0, "Compression CCP4 decompression CCP4") img_c_f = fabio.ext.mar345_IO.uncompress_pck(cmp_ccp4, overflowPix=False, use_CCP4=False) delta = img_c_f - img ok = abs(delta).ravel() if ok.max() > 0: logger.error("img_c_f: %s %s" % numpy.where(delta)) self.assertEqual(ok.max(), 0, "Compression CCP4 decompression Cython") img_f_c = fabio.ext.mar345_IO.uncompress_pck(cmp_fab, overflowPix=False, use_CCP4=True) delta = img_f_c - img ok = abs(delta).ravel() if ok.max() > 0: logger.error("img_f_c: %s %s" % numpy.where(delta)) self.assertEqual(ok.max(), 0, "Compression Cython decompression CCP4") img_f_f = fabio.ext.mar345_IO.uncompress_pck(cmp_fab, overflowPix=False, use_CCP4=False) delta = img_f_f - img ok = abs(delta).ravel() if ok.max() > 0: logger.error("img_f_f: %s %s" % numpy.where(delta)) self.assertEqual(ok.max(), 0, "Compression Cython decompression Cython") def suite(): loadTests = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loadTests(TestMar345)) return testsuite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite) fabio-0.6.0/fabio/test/profile_all.py0000755001611600070440000000602713227357030020623 0ustar kiefferscisoft00000000000000#!/usr/bin/python # coding: utf-8 # # Project: Azimuthal integration # https://github.com/pyFAI/pyFAI # # Copyright (C) 2015 European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """Test suite for all pyFAI modules with timing and memory profiling""" from __future__ import absolute_import, division, print_function __authors__ = ["Jérôme Kieffer"] __contact__ = "jerome.kieffer@esrf.eu" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __date__ = "24/07/2017" import sys import os import unittest import time if __name__ == '__main__': import pkgutil __path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test") from .utilstest import UtilsTest from . import test_all import resource import logging profiler = logging.getLogger("memProf") profiler.setLevel(logging.DEBUG) profiler.handlers.append(logging.FileHandler("profile.log")) class TestResult(unittest.TestResult): def startTest(self, test): self.__mem_start = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss self.__time_start = time.time() unittest.TestResult.startTest(self, test) def stopTest(self, test): unittest.TestResult.stopTest(self, test) profiler.info("Time: %.3fs \t RAM: %.3f Mb\t%s" % (time.time() - self.__time_start, (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss - self.__mem_start) / 1e3, test.id())) class ProfileTestRunner(unittest.TextTestRunner): def _makeResult(self): return TestResult(stream=sys.stderr, descriptions=True, verbosity=1) if __name__ == '__main__': suite = test_all.suite() runner = ProfileTestRunner() testresult = runner.run(suite) if testresult.wasSuccessful(): # UtilsTest.clean_up() print("all tests passed") else: sys.exit(1) fabio-0.6.0/fabio/TiffIO.py0000644001611600070440000014133413227357030016472 0ustar kiefferscisoft00000000000000# The PyMca X-Ray Fluorescence Toolkit # # Copyright (c) 2004-2015 European Synchrotron Radiation Facility # # This file is part of the PyMca X-ray Fluorescence Toolkit developed at # the ESRF by the Software group. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. __author__ = "V.A. Sole - ESRF Data Analysis" __contact__ = "sole@esrf.fr" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __date__ = "25/07/2017" import sys import os import struct import numpy import logging ALLOW_MULTIPLE_STRIPS = False TAG_ID = {256: "NumberOfColumns", # S or L ImageWidth 257: "NumberOfRows", # S or L ImageHeight 258: "BitsPerSample", # S Number of bits per component 259: "Compression", # SHORT (1 - NoCompression, ... 262: "PhotometricInterpretation", # SHORT (0 - WhiteIsZero, 1 -BlackIsZero, 2 - RGB, 3 - Palette color 270: "ImageDescription", # ASCII 273: "StripOffsets", # S or L, for each strip, the byte offset of the strip 277: "SamplesPerPixel", # SHORT (>=3) only for RGB images 278: "RowsPerStrip", # S or L, number of rows in each back may be not for the last 279: "StripByteCounts", # S or L, The number of bytes in the strip AFTER any compression 305: "Software", # ASCII 306: "Date", # ASCII 320: "Colormap", # Colormap of Palette-color Images 339: "SampleFormat", # SHORT Interpretation of data in each pixel } # TILES ARE TO BE SUPPORTED TOO ... TAG_NUMBER_OF_COLUMNS = 256 TAG_NUMBER_OF_ROWS = 257 TAG_BITS_PER_SAMPLE = 258 TAG_PHOTOMETRIC_INTERPRETATION = 262 TAG_COMPRESSION = 259 TAG_IMAGE_DESCRIPTION = 270 TAG_STRIP_OFFSETS = 273 TAG_SAMPLES_PER_PIXEL = 277 TAG_ROWS_PER_STRIP = 278 TAG_STRIP_BYTE_COUNTS = 279 TAG_SOFTWARE = 305 TAG_DATE = 306 TAG_COLORMAP = 320 TAG_SAMPLE_FORMAT = 339 FIELD_TYPE = {1: ('BYTE', "B"), 2: ('ASCII', "s"), # string ending with binary zero 3: ('SHORT', "H"), 4: ('LONG', "I"), 5: ('RATIONAL', "II"), 6: ('SBYTE', "b"), 7: ('UNDEFINED', "B"), 8: ('SSHORT', "h"), 9: ('SLONG', "i"), 10: ('SRATIONAL', "ii"), 11: ('FLOAT', "f"), 12: ('DOUBLE', "d")} FIELD_TYPE_OUT = {'B': 1, 's': 2, 'H': 3, 'I': 4, 'II': 5, 'b': 6, 'h': 8, 'i': 9, 'ii': 10, 'f': 11, 'd': 12} # sample formats (http://www.awaresystems.be/imaging/tiff/tiffflags/sampleformat.html) SAMPLE_FORMAT_UINT = 1 SAMPLE_FORMAT_INT = 2 SAMPLE_FORMAT_FLOAT = 3 # floating point SAMPLE_FORMAT_VOID = 4 # undefined data, usually assumed UINT SAMPLE_FORMAT_COMPLEXINT = 5 SAMPLE_FORMAT_COMPLEXIEEEFP = 6 logger = logging.getLogger(__name__) class TiffIO(object): def __init__(self, filename, mode=None, cache_length=20, mono_output=False): if mode is None: mode = 'rb' if 'b' not in mode: mode = mode + 'b' if 'a' in mode.lower(): raise IOError("Mode %s makes no sense on TIFF files. Consider 'rb+'" % mode) if ('w' in mode): if '+' not in mode: mode += '+' if hasattr(filename, "seek") and\ hasattr(filename, "read"): fd = filename self._access = None else: # the b is needed for windows and python 3 fd = open(filename, mode) self._access = mode self._initInternalVariables(fd) self._maxImageCacheLength = cache_length self._forceMonoOutput = mono_output def __enter__(self): return self def __exit__(self, *arg): # TODO: inspace type, value and traceback self.close() def _initInternalVariables(self, fd=None): if fd is None: fd = self.fd else: self.fd = fd # read the order fd.seek(0) order = fd.read(2).decode() if len(order): if order == "II": # intel, little endian fileOrder = "little" self._structChar = '<' elif order == "MM": # motorola, high endian fileOrder = "big" self._structChar = '>' else: raise IOError("File is not a Mar CCD file, nor a TIFF file") a = fd.read(2) fortyTwo = struct.unpack(self._structChar + "H", a)[0] if fortyTwo != 42: raise IOError("Invalid TIFF version %d" % fortyTwo) else: logger.debug("VALID TIFF VERSION") if sys.byteorder != fileOrder: swap = True else: swap = False else: if sys.byteorder == "little": self._structChar = '<' else: self._structChar = '>' swap = False self._swap = swap self._IFD = [] self._imageDataCacheIndex = [] self._imageDataCache = [] self._imageInfoCacheIndex = [] self._imageInfoCache = [] self.getImageFileDirectories(fd) def __makeSureFileIsOpen(self): if not self.fd.closed: return logger.debug("Reopening closed file") fileName = self.fd.name if self._access is None: # we do not own the file # open in read mode newFile = open(fileName, 'rb') else: newFile = open(fileName, self._access) self.fd = newFile def __makeSureFileIsClosed(self): if self._access is None: # we do not own the file logger.debug("Not closing not owned file") return if not self.fd.closed: self.fd.close() def close(self): return self.__makeSureFileIsClosed() def getNumberOfImages(self): # update for the case someone has done anything? self._updateIFD() return len(self._IFD) def _updateIFD(self): self.__makeSureFileIsOpen() self.getImageFileDirectories() self.__makeSureFileIsClosed() def getImageFileDirectories(self, fd=None): if fd is None: fd = self.fd else: self.fd = fd st = self._structChar fd.seek(4) self._IFD = [] nImages = 0 fmt = st + 'I' inStr = fd.read(struct.calcsize(fmt)) if not len(inStr): offsetToIFD = 0 else: offsetToIFD = struct.unpack(fmt, inStr)[0] logger.debug("Offset to first IFD = %d", offsetToIFD) while offsetToIFD != 0: self._IFD.append(offsetToIFD) nImages += 1 fd.seek(offsetToIFD) fmt = st + 'H' numberOfDirectoryEntries = struct.unpack(fmt, fd.read(struct.calcsize(fmt)))[0] logger.debug("Number of directory entries = %d", numberOfDirectoryEntries) fmt = st + 'I' fd.seek(offsetToIFD + 2 + 12 * numberOfDirectoryEntries) offsetToIFD = struct.unpack(fmt, fd.read(struct.calcsize(fmt)))[0] logger.debug("Next Offset to IFD = %d", offsetToIFD) # offsetToIFD = 0 logger.debug("Number of images found = %d", nImages) return nImages def _parseImageFileDirectory(self, nImage): offsetToIFD = self._IFD[nImage] st = self._structChar fd = self.fd fd.seek(offsetToIFD) fmt = st + 'H' numberOfDirectoryEntries = struct.unpack(fmt, fd.read(struct.calcsize(fmt)))[0] logger.debug("Number of directory entries = %d", numberOfDirectoryEntries) fmt = st + 'HHI4s' tagIDList = [] fieldTypeList = [] nValuesList = [] valueOffsetList = [] for _ in range(numberOfDirectoryEntries): tagID, fieldType, nValues, valueOffset = struct.unpack(fmt, fd.read(12)) tagIDList.append(tagID) fieldTypeList.append(fieldType) nValuesList.append(nValues) if nValues == 1: ftype, vfmt = FIELD_TYPE[fieldType] if ftype not in ['ASCII', 'RATIONAL', 'SRATIONAL']: vfmt = st + vfmt data = valueOffset[0: struct.calcsize(vfmt)] if struct.calcsize(vfmt) > len(data): # Add a 0 padding to have the expected size logger.warning("Data at tag id '%s' is smaller than expected", tagID) data = data + b"\x00" * (struct.calcsize(vfmt) - len(data)) actualValue = struct.unpack(vfmt, data)[0] valueOffsetList.append(actualValue) else: valueOffsetList.append(valueOffset) elif (nValues < 5) and (fieldType == 2): ftype, vfmt = FIELD_TYPE[fieldType] vfmt = st + "%d%s" % (nValues, vfmt) actualValue = struct.unpack(vfmt, valueOffset[0: struct.calcsize(vfmt)])[0] valueOffsetList.append(actualValue) else: valueOffsetList.append(valueOffset) if logger.getEffectiveLevel() == logging.DEBUG: if tagID in TAG_ID: logger.debug("tagID = %s", TAG_ID[tagID]) else: logger.debug("tagID = %d", tagID) logger.debug("fieldType = %s", FIELD_TYPE[fieldType][0]) logger.debug("nValues = %d", nValues) # if nValues == 1: # logger.debug("valueOffset = %s", valueOffset) return tagIDList, fieldTypeList, nValuesList, valueOffsetList def _readIFDEntry(self, tag, tagIDList, fieldTypeList, nValuesList, valueOffsetList): fd = self.fd st = self._structChar idx = tagIDList.index(tag) nValues = nValuesList[idx] output = [] _ftype, vfmt = FIELD_TYPE[fieldTypeList[idx]] vfmt = st + "%d%s" % (nValues, vfmt) requestedBytes = struct.calcsize(vfmt) if nValues == 1: output.append(valueOffsetList[idx]) elif requestedBytes < 5: output.append(valueOffsetList[idx]) else: fd.seek(struct.unpack(st + "I", valueOffsetList[idx])[0]) output = struct.unpack(vfmt, fd.read(requestedBytes)) return output def getData(self, nImage, **kw): if nImage >= len(self._IFD): # update prior to raise an index error error self._updateIFD() return self._readImage(nImage, **kw) def getImage(self, nImage): return self.getData(nImage) def getInfo(self, nImage, **kw): if nImage >= len(self._IFD): # update prior to raise an index error error self._updateIFD() # current = self._IFD[nImage] return self._readInfo(nImage) def _readInfo(self, nImage, close=True): if nImage in self._imageInfoCacheIndex: logger.debug("Reading info from cache") return self._imageInfoCache[self._imageInfoCacheIndex.index(nImage)] # read the header self.__makeSureFileIsOpen() tagIDList, fieldTypeList, nValuesList, valueOffsetList = self._parseImageFileDirectory(nImage) # rows and columns nColumns = valueOffsetList[tagIDList.index(TAG_NUMBER_OF_COLUMNS)] nRows = valueOffsetList[tagIDList.index(TAG_NUMBER_OF_ROWS)] # bits per sample idx = tagIDList.index(TAG_BITS_PER_SAMPLE) nBits = valueOffsetList[idx] if nValuesList[idx] != 1: # this happens with RGB and friends, nBits is not a single value nBits = self._readIFDEntry(TAG_BITS_PER_SAMPLE, tagIDList, fieldTypeList, nValuesList, valueOffsetList) if TAG_COLORMAP in tagIDList: idx = tagIDList.index(TAG_COLORMAP) tmpColormap = self._readIFDEntry(TAG_COLORMAP, tagIDList, fieldTypeList, nValuesList, valueOffsetList) if max(tmpColormap) > 255: tmpColormap = numpy.array(tmpColormap, dtype=numpy.uint16) tmpColormap = (tmpColormap / 256.).astype(numpy.uint8) else: tmpColormap = numpy.array(tmpColormap, dtype=numpy.uint8) tmpColormap.shape = 3, -1 colormap = numpy.zeros((tmpColormap.shape[-1], 3), tmpColormap.dtype) colormap[:, :] = tmpColormap.T tmpColormap = None else: colormap = None # sample format if TAG_SAMPLE_FORMAT in tagIDList: sampleFormat = valueOffsetList[tagIDList.index(TAG_SAMPLE_FORMAT)] else: # set to unknown sampleFormat = SAMPLE_FORMAT_VOID # compression compression = False compression_type = 1 if TAG_COMPRESSION in tagIDList: compression_type = valueOffsetList[tagIDList.index(TAG_COMPRESSION)] if compression_type == 1: compression = False else: compression = True # photometric interpretation interpretation = 1 if TAG_PHOTOMETRIC_INTERPRETATION in tagIDList: interpretation = valueOffsetList[tagIDList.index(TAG_PHOTOMETRIC_INTERPRETATION)] else: logger.debug("WARNING: Non standard TIFF. Photometric interpretation TAG missing") helpString = "" if sys.version > '2.6': helpString = eval('b""') if TAG_IMAGE_DESCRIPTION in tagIDList: imageDescription = self._readIFDEntry(TAG_IMAGE_DESCRIPTION, tagIDList, fieldTypeList, nValuesList, valueOffsetList) if type(imageDescription) in [type([1]), type((1,))]: imageDescription = helpString.join(imageDescription) else: imageDescription = "%d/%d" % (nImage + 1, len(self._IFD)) if sys.version < '3.0': defaultSoftware = "Unknown Software" else: defaultSoftware = bytes("Unknown Software", encoding='utf-8') if TAG_SOFTWARE in tagIDList: software = self._readIFDEntry(TAG_SOFTWARE, tagIDList, fieldTypeList, nValuesList, valueOffsetList) if type(software) in [type([1]), type((1,))]: software = helpString.join(software) else: software = defaultSoftware if software == defaultSoftware: try: if sys.version < '3.0': if imageDescription.upper().startswith("IMAGEJ"): software = imageDescription.split("=")[0] else: tmpString = imageDescription.decode() if tmpString.upper().startswith("IMAGEJ"): software = bytes(tmpString.split("=")[0], encoding='utf-8') except: pass if TAG_DATE in tagIDList: date = self._readIFDEntry(TAG_DATE, tagIDList, fieldTypeList, nValuesList, valueOffsetList) if type(date) in [type([1]), type((1,))]: date = helpString.join(date) else: date = "Unknown Date" stripOffsets = self._readIFDEntry(TAG_STRIP_OFFSETS, tagIDList, fieldTypeList, nValuesList, valueOffsetList) if TAG_ROWS_PER_STRIP in tagIDList: rowsPerStrip = self._readIFDEntry(TAG_ROWS_PER_STRIP, tagIDList, fieldTypeList, nValuesList, valueOffsetList)[0] else: rowsPerStrip = nRows logger.warning("Non standard TIFF. Rows per strip TAG missing") if TAG_STRIP_BYTE_COUNTS in tagIDList: stripByteCounts = self._readIFDEntry(TAG_STRIP_BYTE_COUNTS, tagIDList, fieldTypeList, nValuesList, valueOffsetList) else: logger.warning("Non standard TIFF. Strip byte counts TAG missing") if hasattr(nBits, 'index'): expectedSum = 0 for n in nBits: expectedSum += int(nRows * nColumns * n / 8) else: expectedSum = int(nRows * nColumns * nBits / 8) stripByteCounts = [expectedSum] if close: self.__makeSureFileIsClosed() if self._forceMonoOutput and (interpretation > 1): # color image but asked monochrome output nBits = 32 colormap = None sampleFormat = SAMPLE_FORMAT_FLOAT interpretation = 1 # we cannot rely on any cache in this case useInfoCache = False logger.debug("FORCED MONO") else: useInfoCache = True info = {} info["nRows"] = nRows info["nColumns"] = nColumns info["nBits"] = nBits info["compression"] = compression info["compression_type"] = compression_type info["imageDescription"] = imageDescription info["stripOffsets"] = stripOffsets # This contains the file offsets to the data positions info["rowsPerStrip"] = rowsPerStrip info["stripByteCounts"] = stripByteCounts # bytes in strip since I do not support compression info["software"] = software info["date"] = date info["colormap"] = colormap info["sampleFormat"] = sampleFormat info["photometricInterpretation"] = interpretation infoDict = {} if sys.version < '3.0': testString = 'PyMca' else: testString = eval('b"PyMca"') if software.startswith(testString): # str to make sure python 2.x sees it as string and not unicode if sys.version < '3.0': descriptionString = imageDescription else: descriptionString = str(imageDescription.decode()) # interpret the image description in terms of supplied # information at writing time items = descriptionString.split('=') for i in range(int(len(items) / 2)): key = "%s" % items[i * 2] # get rid of the \n at the end of the value value = "%s" % items[i * 2 + 1][:-1] infoDict[key] = value info['info'] = infoDict if (self._maxImageCacheLength > 0) and useInfoCache: self._imageInfoCacheIndex.insert(0, nImage) self._imageInfoCache.insert(0, info) if len(self._imageInfoCacheIndex) > self._maxImageCacheLength: self._imageInfoCacheIndex = self._imageInfoCacheIndex[:self._maxImageCacheLength] self._imageInfoCache = self._imageInfoCache[:self._maxImageCacheLength] return info def _readImage(self, nImage, **kw): logger.debug("Reading image %d", nImage) if 'close' in kw: close = kw['close'] else: close = True rowMin = kw.get('rowMin', None) rowMax = kw.get('rowMax', None) if nImage in self._imageDataCacheIndex: logger.debug("Reading image data from cache") return self._imageDataCache[self._imageDataCacheIndex.index(nImage)] self.__makeSureFileIsOpen() if self._forceMonoOutput: oldMono = True else: oldMono = False try: self._forceMonoOutput = False info = self._readInfo(nImage, close=False) self._forceMonoOutput = oldMono except: self._forceMonoOutput = oldMono raise compression = info['compression'] compression_type = info['compression_type'] if compression: if compression_type != 32773: raise IOError("Compressed TIFF images not supported except packbits") else: # PackBits compression logger.debug("Using PackBits compression") interpretation = info["photometricInterpretation"] if interpretation == 2: # RGB pass # raise IOError("RGB Image. Only grayscale images supported") elif interpretation == 3: # Palette Color Image pass # raise IOError("Palette-color Image. Only grayscale images supported") elif interpretation > 2: # Palette Color Image raise IOError("Only grayscale images supported") nRows = info["nRows"] nColumns = info["nColumns"] nBits = info["nBits"] colormap = info["colormap"] sampleFormat = info["sampleFormat"] if rowMin is None: rowMin = 0 if rowMax is None: rowMax = nRows - 1 if rowMin < 0: rowMin = nRows - rowMin if rowMax < 0: rowMax = nRows - rowMax if rowMax < rowMin: txt = "Max Row smaller than Min Row. Reverse selection not supported" raise NotImplemented(txt) if rowMin >= nRows: raise IndexError("Image only has %d rows" % nRows) if rowMax >= nRows: raise IndexError("Image only has %d rows" % nRows) if sampleFormat == SAMPLE_FORMAT_FLOAT: if nBits == 32: dtype = numpy.float32 elif nBits == 64: dtype = numpy.float64 else: raise ValueError("Unsupported number of bits for a float: %d" % nBits) elif sampleFormat in [SAMPLE_FORMAT_UINT, SAMPLE_FORMAT_VOID]: if nBits in [8, (8, 8, 8), [8, 8, 8]]: dtype = numpy.uint8 elif nBits in [16, (16, 16, 16), [16, 16, 16]]: dtype = numpy.uint16 elif nBits in [32, (32, 32, 32), [32, 32, 32]]: dtype = numpy.uint32 elif nBits in [64, (64, 64, 64), [64, 64, 64]]: dtype = numpy.uint64 else: raise ValueError("Unsupported number of bits for unsigned int: %s" % (nBits,)) elif sampleFormat == SAMPLE_FORMAT_INT: if nBits in [8, (8, 8, 8), [8, 8, 8]]: dtype = numpy.int8 elif nBits in [16, (16, 16, 16), [16, 16, 16]]: dtype = numpy.int16 elif nBits in [32, (32, 32, 32), [32, 32, 32]]: dtype = numpy.int32 elif nBits in [64, (64, 64, 64), [64, 64, 64]]: dtype = numpy.int64 else: raise ValueError("Unsupported number of bits for signed int: %s" % (nBits,)) else: raise ValueError("Unsupported combination. Bits = %s Format = %d" % (nBits, sampleFormat)) if hasattr(nBits, 'index'): image = numpy.zeros((nRows, nColumns, len(nBits)), dtype=dtype) elif colormap is not None: # should I use colormap dtype? image = numpy.zeros((nRows, nColumns, 3), dtype=dtype) else: image = numpy.zeros((nRows, nColumns), dtype=dtype) fd = self.fd st = self._structChar stripOffsets = info["stripOffsets"] # This contains the file offsets to the data positions rowsPerStrip = info["rowsPerStrip"] stripByteCounts = info["stripByteCounts"] # bytes in strip since I do not support compression rowStart = 0 if len(stripOffsets) == 1: bytesPerRow = int(stripByteCounts[0] / rowsPerStrip) if nRows == rowsPerStrip: actualBytesPerRow = int(image.nbytes / nRows) if actualBytesPerRow != bytesPerRow: logger.warning("Bogus StripByteCounts information") bytesPerRow = actualBytesPerRow fd.seek(stripOffsets[0] + rowMin * bytesPerRow) nBytes = (rowMax - rowMin + 1) * bytesPerRow if self._swap: readout = numpy.fromstring(fd.read(nBytes), dtype).byteswap() else: readout = numpy.fromstring(fd.read(nBytes), dtype) if hasattr(nBits, 'index'): readout.shape = -1, nColumns, len(nBits) elif info['colormap'] is not None: readout = colormap[readout] else: readout.shape = -1, nColumns image[rowMin:rowMax + 1, :] = readout else: for i in range(len(stripOffsets)): # the amount of rows nRowsToRead = rowsPerStrip rowEnd = int(min(rowStart + nRowsToRead, nRows)) if rowEnd < rowMin: rowStart += nRowsToRead continue if (rowStart > rowMax): break # we are in position fd.seek(stripOffsets[i]) # the amount of bytes to read nBytes = stripByteCounts[i] if compression_type == 32773: try: bufferBytes = bytes() except: # python 2.5 ... bufferBytes = "" # packBits readBytes = 0 # intermediate buffer tmpBuffer = fd.read(nBytes) while readBytes < nBytes: n = struct.unpack('b', tmpBuffer[readBytes:(readBytes + 1)])[0] readBytes += 1 if n >= 0: # should I prevent reading more than the # length of the chain? Let's python raise # the exception... bufferBytes += tmpBuffer[readBytes: readBytes + (n + 1)] readBytes += (n + 1) elif n > -128: bufferBytes += (-n + 1) * tmpBuffer[readBytes:(readBytes + 1)] readBytes += 1 else: # if read -128 ignore the byte continue if self._swap: readout = numpy.fromstring(bufferBytes, dtype).byteswap() else: readout = numpy.fromstring(bufferBytes, dtype) if hasattr(nBits, 'index'): readout.shape = -1, nColumns, len(nBits) elif info['colormap'] is not None: readout = colormap[readout] readout.shape = -1, nColumns, 3 else: readout.shape = -1, nColumns image[rowStart:rowEnd, :] = readout else: if 1: # use numpy if self._swap: readout = numpy.fromstring(fd.read(nBytes), dtype).byteswap() else: readout = numpy.fromstring(fd.read(nBytes), dtype) if hasattr(nBits, 'index'): readout.shape = -1, nColumns, len(nBits) elif colormap is not None: readout = colormap[readout] readout.shape = -1, nColumns, 3 else: readout.shape = -1, nColumns image[rowStart:rowEnd, :] = readout else: # using struct readout = numpy.array(struct.unpack(st + "%df" % int(nBytes / 4), fd.read(nBytes)), dtype=dtype) if hasattr(nBits, 'index'): readout.shape = -1, nColumns, len(nBits) elif colormap is not None: readout = colormap[readout] readout.shape = -1, nColumns, 3 else: readout.shape = -1, nColumns image[rowStart:rowEnd, :] = readout rowStart += nRowsToRead if close: self.__makeSureFileIsClosed() if len(image.shape) == 3: # color image if self._forceMonoOutput: # color image, convert to monochrome image = (image[:, :, 0] * 0.114 + image[:, :, 1] * 0.587 + image[:, :, 2] * 0.299).astype(numpy.float32) if (rowMin == 0) and (rowMax == (nRows - 1)): self._imageDataCacheIndex.insert(0, nImage) self._imageDataCache.insert(0, image) if len(self._imageDataCacheIndex) > self._maxImageCacheLength: self._imageDataCacheIndex = self._imageDataCacheIndex[:self._maxImageCacheLength] self._imageDataCache = self._imageDataCache[:self._maxImageCacheLength] return image def writeImage(self, image0, info=None, software=None, date=None): if software is None: software = 'PyMca.TiffIO' # if date is None: # date = time.ctime() self.__makeSureFileIsOpen() fd = self.fd # prior to do anything, perform some tests if not len(image0.shape): raise ValueError("Empty image") if len(image0.shape) == 1: # get a different view image = image0[:] image.shape = 1, -1 else: image = image0 if image.dtype == numpy.float64: image = image.astype(numpy.float32) fd.seek(0) mode = fd.mode name = fd.name if 'w' in mode: # we have to overwrite the file self.__makeSureFileIsClosed() fd = None if os.path.exists(name): os.remove(name) fd = open(name, mode='wb+') self._initEmptyFile(fd) self.fd = fd # read the file size self.__makeSureFileIsOpen() fd = self.fd fd.seek(0, os.SEEK_END) endOfFile = fd.tell() if fd.tell() == 0: self._initEmptyFile(fd) fd.seek(0, os.SEEK_END) endOfFile = fd.tell() # init internal variables self._initInternalVariables(fd) st = self._structChar # get the image file directories nImages = self.getImageFileDirectories() logger.debug("File contains %d images", nImages) if nImages == 0: fd.seek(4) fmt = st + 'I' fd.write(struct.pack(fmt, endOfFile)) else: fd.seek(self._IFD[-1]) fmt = st + 'H' numberOfDirectoryEntries = struct.unpack(fmt, fd.read(struct.calcsize(fmt)))[0] fmt = st + 'I' pos = self._IFD[-1] + 2 + 12 * numberOfDirectoryEntries fd.seek(pos) fmt = st + 'I' fd.write(struct.pack(fmt, endOfFile)) fd.flush() # and we can write at the end of the file, find out the file length fd.seek(0, os.SEEK_END) # get the description information from the input information if info is None: description = info else: description = "%s" % "" for key in info.keys(): description += "%s=%s\n" % (key, info[key]) # get the image file directory outputIFD = self._getOutputIFD(image, description=description, software=software, date=date) # write the new IFD fd.write(outputIFD) # write the image if self._swap: fd.write(image.byteswap().tostring()) else: fd.write(image.tostring()) fd.flush() self.fd = fd self.__makeSureFileIsClosed() def _initEmptyFile(self, fd=None): if fd is None: fd = self.fd if sys.byteorder == "little": order = "II" # intel, little endian fileOrder = "little" self._structChar = '<' else: order = "MM" # motorola, high endian fileOrder = "big" self._structChar = '>' st = self._structChar if fileOrder == sys.byteorder: self._swap = False else: self._swap = True fd.seek(0) if sys.version < '3.0': fd.write(struct.pack(st + '2s', order)) fd.write(struct.pack(st + 'H', 42)) fd.write(struct.pack(st + 'I', 0)) else: fd.write(struct.pack(st + '2s', bytes(order, 'utf-8'))) fd.write(struct.pack(st + 'H', 42)) fd.write(struct.pack(st + 'I', 0)) fd.flush() def _getOutputIFD(self, image, description=None, software=None, date=None): # the tags have to be in order # the very minimum is # 256:"NumberOfColumns", # S or L ImageWidth # 257:"NumberOfRows", # S or L ImageHeight # 258:"BitsPerSample", # S Number of bits per component # 259:"Compression", # SHORT (1 - NoCompression, ... # 262:"PhotometricInterpretation", # SHORT (0 - WhiteIsZero, 1 -BlackIsZero, 2 - RGB, 3 - Palette color # 270:"ImageDescription", # ASCII # 273:"StripOffsets", # S or L, for each strip, the byte offset of the strip # 277:"SamplesPerPixel", # SHORT (>=3) only for RGB images # 278:"RowsPerStrip", # S or L, number of rows in each back may be not for the last # 279:"StripByteCounts", # S or L, The number of bytes in the strip AFTER any compression # 305:"Software", # ASCII # 306:"Date", # ASCII # 339:"SampleFormat", # SHORT Interpretation of data in each pixel nDirectoryEntries = 9 imageDescription = None if description is not None: descriptionLength = len(description) while descriptionLength < 4: description = description + " " descriptionLength = len(description) if sys.version >= '3.0': description = bytes(description, 'utf-8') elif isinstance(description, str): try: description = description.decode('utf-8') except UnicodeDecodeError: try: description = description.decode('latin-1') except UnicodeDecodeError: description = "%s" % description if sys.version > '2.6': description = description.encode('utf-8', errors="ignore") description = "%s" % description descriptionLength = len(description) imageDescription = struct.pack("%ds" % descriptionLength, description) nDirectoryEntries += 1 # software if software is not None: softwareLength = len(software) while softwareLength < 4: software = software + " " softwareLength = len(software) if sys.version >= '3.0': software = bytes(software, 'utf-8') softwarePackedString = struct.pack("%ds" % softwareLength, software) nDirectoryEntries += 1 else: softwareLength = 0 if date is not None: dateLength = len(date) if sys.version >= '3.0': date = bytes(date, 'utf-8') datePackedString = struct.pack("%ds" % dateLength, date) dateLength = len(datePackedString) nDirectoryEntries += 1 else: dateLength = 0 if len(image.shape) == 2: nRows, nColumns = image.shape nChannels = 1 elif len(image.shape) == 3: nRows, nColumns, nChannels = image.shape else: raise RuntimeError("Image does not have the right shape") dtype = image.dtype bitsPerSample = int(dtype.str[-1]) * 8 # only uncompressed data compression = 1 # interpretation, black is zero if nChannels == 1: interpretation = 1 bitsPerSampleLength = 0 elif nChannels == 3: interpretation = 2 bitsPerSampleLength = 3 * 2 # To store 3 shorts nDirectoryEntries += 1 # For SamplesPerPixel else: raise RuntimeError( "Image with %d color channel(s) not supported" % nChannels) # image description if imageDescription is not None: descriptionLength = len(imageDescription) else: descriptionLength = 0 # strip offsets # we are putting them after the directory and the directory is # at the end of the file self.fd.seek(0, os.SEEK_END) endOfFile = self.fd.tell() if endOfFile == 0: # empty file endOfFile = 8 # rows per strip if ALLOW_MULTIPLE_STRIPS: # try to segment the image in several pieces if not (nRows % 4): rowsPerStrip = int(nRows / 4) elif not (nRows % 10): rowsPerStrip = int(nRows / 10) elif not (nRows % 8): rowsPerStrip = int(nRows / 8) elif not (nRows % 4): rowsPerStrip = int(nRows / 4) elif not (nRows % 2): rowsPerStrip = int(nRows / 2) else: rowsPerStrip = nRows else: rowsPerStrip = nRows # stripByteCounts stripByteCounts = int(nColumns * rowsPerStrip * bitsPerSample * nChannels / 8) if descriptionLength > 4: stripOffsets0 = endOfFile + dateLength + descriptionLength + \ 2 + 12 * nDirectoryEntries + 4 else: stripOffsets0 = endOfFile + dateLength + \ 2 + 12 * nDirectoryEntries + 4 if softwareLength > 4: stripOffsets0 += softwareLength stripOffsets0 += bitsPerSampleLength stripOffsets = [stripOffsets0] stripOffsetsLength = 0 stripOffsetsString = None st = self._structChar if rowsPerStrip != nRows: nStripOffsets = int(nRows / rowsPerStrip) fmt = st + 'I' stripOffsetsLength = struct.calcsize(fmt) * nStripOffsets stripOffsets0 += stripOffsetsLength # the length for the stripByteCounts will be the same stripOffsets0 += stripOffsetsLength stripOffsets = [] for i in range(nStripOffsets): value = stripOffsets0 + i * stripByteCounts stripOffsets.append(value) if i == 0: stripOffsetsString = struct.pack(fmt, value) stripByteCountsString = struct.pack(fmt, stripByteCounts) else: stripOffsetsString += struct.pack(fmt, value) stripByteCountsString += struct.pack(fmt, stripByteCounts) logger.debug("IMAGE WILL START AT %d", stripOffsets[0]) # sample format if dtype in [numpy.float32, numpy.float64] or\ dtype.str[-2] == 'f': sampleFormat = SAMPLE_FORMAT_FLOAT elif dtype in [numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64]: sampleFormat = SAMPLE_FORMAT_UINT elif dtype in [numpy.int8, numpy.int16, numpy.int32, numpy.int64]: sampleFormat = SAMPLE_FORMAT_INT else: raise ValueError("Unsupported data type %s" % dtype) info = {} info["nColumns"] = nColumns info["nRows"] = nRows info["nBits"] = bitsPerSample info["compression"] = compression info["photometricInterpretation"] = interpretation info["stripOffsets"] = stripOffsets if interpretation == 2: info["samplesPerPixel"] = 3 # No support for extra samples info["rowsPerStrip"] = rowsPerStrip info["stripByteCounts"] = stripByteCounts info["date"] = date info["sampleFormat"] = sampleFormat outputIFD = "" if sys.version > '2.6': outputIFD = eval('b""') fmt = st + "H" outputIFD += struct.pack(fmt, nDirectoryEntries) fmt = st + "HHII" outputIFD += struct.pack(fmt, TAG_NUMBER_OF_COLUMNS, FIELD_TYPE_OUT['I'], 1, info["nColumns"]) outputIFD += struct.pack(fmt, TAG_NUMBER_OF_ROWS, FIELD_TYPE_OUT['I'], 1, info["nRows"]) if info["photometricInterpretation"] == 1: fmt = st + 'HHIHH' outputIFD += struct.pack(fmt, TAG_BITS_PER_SAMPLE, FIELD_TYPE_OUT['H'], 1, info["nBits"], 0) elif info["photometricInterpretation"] == 2: fmt = st + 'HHII' outputIFD += struct.pack(fmt, TAG_BITS_PER_SAMPLE, FIELD_TYPE_OUT['H'], 3, info["stripOffsets"][0] - 2 * stripOffsetsLength - descriptionLength - dateLength - softwareLength - bitsPerSampleLength) else: raise RuntimeError("Unsupported photometric interpretation") fmt = st + 'HHIHH' outputIFD += struct.pack(fmt, TAG_COMPRESSION, FIELD_TYPE_OUT['H'], 1, info["compression"], 0) fmt = st + 'HHIHH' outputIFD += struct.pack(fmt, TAG_PHOTOMETRIC_INTERPRETATION, FIELD_TYPE_OUT['H'], 1, info["photometricInterpretation"], 0) if imageDescription is not None: descriptionLength = len(imageDescription) if descriptionLength > 4: fmt = st + 'HHII' outputIFD += struct.pack(fmt, TAG_IMAGE_DESCRIPTION, FIELD_TYPE_OUT['s'], descriptionLength, info["stripOffsets"][0] - 2 * stripOffsetsLength - descriptionLength) else: # it has to have length 4 fmt = st + 'HHI%ds' % descriptionLength outputIFD += struct.pack(fmt, TAG_IMAGE_DESCRIPTION, FIELD_TYPE_OUT['s'], descriptionLength, description) if len(stripOffsets) == 1: fmt = st + 'HHII' outputIFD += struct.pack(fmt, TAG_STRIP_OFFSETS, FIELD_TYPE_OUT['I'], 1, info["stripOffsets"][0]) else: fmt = st + 'HHII' outputIFD += struct.pack(fmt, TAG_STRIP_OFFSETS, FIELD_TYPE_OUT['I'], len(stripOffsets), info["stripOffsets"][0] - 2 * stripOffsetsLength) if info["photometricInterpretation"] == 2: fmt = st + 'HHIHH' outputIFD += struct.pack(fmt, TAG_SAMPLES_PER_PIXEL, FIELD_TYPE_OUT['H'], 1, info["samplesPerPixel"], 0) fmt = st + 'HHII' outputIFD += struct.pack(fmt, TAG_ROWS_PER_STRIP, FIELD_TYPE_OUT['I'], 1, info["rowsPerStrip"]) if len(stripOffsets) == 1: fmt = st + 'HHII' outputIFD += struct.pack(fmt, TAG_STRIP_BYTE_COUNTS, FIELD_TYPE_OUT['I'], 1, info["stripByteCounts"]) else: fmt = st + 'HHII' outputIFD += struct.pack(fmt, TAG_STRIP_BYTE_COUNTS, FIELD_TYPE_OUT['I'], len(stripOffsets), info["stripOffsets"][0] - stripOffsetsLength) if software is not None: if softwareLength > 4: fmt = st + 'HHII' outputIFD += struct.pack(fmt, TAG_SOFTWARE, FIELD_TYPE_OUT['s'], softwareLength, info["stripOffsets"][0] - 2 * stripOffsetsLength - descriptionLength - softwareLength - dateLength) else: # it has to have length 4 fmt = st + 'HHI%ds' % softwareLength outputIFD += struct.pack(fmt, TAG_SOFTWARE, FIELD_TYPE_OUT['s'], softwareLength, softwarePackedString) if date is not None: fmt = st + 'HHII' outputIFD += struct.pack(fmt, TAG_DATE, FIELD_TYPE_OUT['s'], dateLength, info["stripOffsets"][0] - 2 * stripOffsetsLength - descriptionLength - dateLength) fmt = st + 'HHIHH' outputIFD += struct.pack(fmt, TAG_SAMPLE_FORMAT, FIELD_TYPE_OUT['H'], 1, info["sampleFormat"], 0) fmt = st + 'I' outputIFD += struct.pack(fmt, 0) if info["photometricInterpretation"] == 2: outputIFD += struct.pack('HHH', info["nBits"], info["nBits"], info["nBits"]) if softwareLength > 4: outputIFD += softwarePackedString if date is not None: outputIFD += datePackedString if imageDescription is not None: if descriptionLength > 4: outputIFD += imageDescription if stripOffsetsString is not None: outputIFD += stripOffsetsString outputIFD += stripByteCountsString return outputIFD if __name__ == "__main__": filename = sys.argv[1] dtype = numpy.uint16 if not os.path.exists(filename): print("Testing file creation") tif = TiffIO(filename, mode='wb+') data = numpy.arange(10000).astype(dtype) data.shape = 100, 100 tif.writeImage(data, info={'Title': '1st'}) tif = None if os.path.exists(filename): print("Testing image appending") tif = TiffIO(filename, mode='rb+') tif.writeImage((data * 2).astype(dtype), info={'Title': '2nd'}) tif = None tif = TiffIO(filename) print("Number of images = %d" % tif.getNumberOfImages()) for i in range(tif.getNumberOfImages()): info = tif.getInfo(i) for key in info: if key not in ["colormap"]: print("%s = %s" % (key, info[key])) elif info['colormap'] is not None: print("RED %s = %s" % (key, info[key][0:10, 0])) print("GREEN %s = %s" % (key, info[key][0:10, 1])) print("BLUE %s = %s" % (key, info[key][0:10, 2])) data = tif.getImage(i)[0, 0:10] print("data [0, 0:10] = ", data) fabio-0.6.0/fabio/fit2dspreadsheetimage.py0000644001611600070440000000633313227357030021614 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ Read the fit2d ascii image output + Jon Wright, ESRF """ # Get ready for python3: from __future__ import absolute_import, print_function, with_statement, division import numpy from .fabioimage import FabioImage class Fit2dSpreadsheetImage(FabioImage): """ Read a fit2d ascii format """ DESCRIPTION = "Fit2d spreadsheet ascii file format" DEFAULT_EXTENSIONS = ["spr"] def _readheader(self, infile): """ TODO : test for minimal attributes? """ line = infile.readline() items = line.split() xdim = int(items[0]) ydim = int(items[1]) self.header['title'] = line self.header['Dim_1'] = xdim self.header['Dim_2'] = ydim def read(self, fname, frame=None): """ Read in header into self.header and the data into self.data """ self.header = self.check_header() self.resetvals() infile = self._open(fname) self._readheader(infile) # Compute image size try: self.dim1 = int(self.header['Dim_1']) self.dim2 = int(self.header['Dim_2']) except (ValueError, KeyError): raise IOError("file %s is corrupt, cannot read it" % str(fname)) bytecode = numpy.float32 self.bpp = len(numpy.array(0, bytecode).tostring()) # now read the data into the array try: vals = [] for line in infile.readlines(): try: vals.append([float(x) for x in line.split()]) except: pass self.data = numpy.array(vals).astype(bytecode) assert self.data.shape == (self.dim2, self.dim1) except: raise IOError("Error reading ascii") self.resetvals() # ensure the PIL image is reset self.pilimage = None return self fit2dspreadsheetimage = Fit2dSpreadsheetImage fabio-0.6.0/fabio/binaryimage.py0000644001611600070440000001131313227357030017632 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Authors: Gael Goret, Jerome Kieffer, ESRF, France Emails: gael.goret@esrf.fr, jerome.kieffer@esrf.fr Brian Richard Pauw Binary files images are simple none-compressed 2D images only defined by their : data-type, dimensions, byte order and offset This simple library has been made for manipulating exotic/unknown files format. """ # Get ready for python3: from __future__ import with_statement, print_function __authors__ = ["Gaël Goret", "Jérôme Kieffer", "Brian Pauw"] __contact__ = "gael.goret@esrf.fr" __license__ = "GPLv3+" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __version__ = "17/10/2012" from .fabioimage import FabioImage import numpy import logging logger = logging.getLogger(__name__) class BinaryImage(FabioImage): """ This simple library has been made for manipulating exotic/unknown files format. Binary files images are simple none-compressed 2D images only defined by their: data-type, dimensions, byte order and offset if offset is set to a negative value, the image is read using the last data but n data in the file, skipping any header. """ DESCRIPTION = "Binary format (none-compressed 2D images)" DEFAULT_EXTENSIONS = ["bin"] def __init__(self, *args, **kwargs): FabioImage.__init__(self, *args, **kwargs) @staticmethod def swap_needed(endian): """ Decide if we need to byteswap """ if (endian == '<' and numpy.little_endian) or (endian == '>' and not numpy.little_endian): return False if (endian == '>' and numpy.little_endian) or (endian == '<' and not numpy.little_endian): return True def read(self, fname, dim1, dim2, offset=0, bytecode="int32", endian="<"): """ Read a binary image :param str fname: file name :param int dim1: image dimensions (Fast index) :param int dim2: image dimensions (Slow index) :param int offset: starting position of the data-block. If negative, starts at the end. :param bytecode: can be "int8","int16","int32","int64","uint8","uint16","uint32","uint64","float32","float64",... :param endian: among short or long endian ("<" or ">") """ self.filename = fname self.dim1 = dim1 self.dim2 = dim2 self.bytecode = bytecode f = open(self.filename, "rb") dims = [dim2, dim1] bpp = len(numpy.array(0, bytecode).tostring()) size = dims[0] * dims[1] * bpp if offset >= 0: f.seek(offset) else: try: f.seek(-size + offset + 1, 2) # seek from EOF backwards except IOError: logger.warning('expected datablock too large, please check bytecode settings: {}'.format(bytecode)) except: logger.error('Uncommon error encountered when reading file') rawData = f.read(size) if self.swap_needed(endian): data = numpy.fromstring(rawData, bytecode).byteswap().reshape(tuple(dims)) else: data = numpy.fromstring(rawData, bytecode).reshape(tuple(dims)) self.data = data return self def estimate_offset_value(self, fname, dim1, dim2, bytecode="int32"): "Estimates the size of a file" with open(fname, "rb") as f: bpp = len(numpy.array(0, bytecode).tostring()) size = dim1 * dim2 * bpp totsize = len(f.read()) logger.info('total size (bytes): %s', totsize) logger.info('expected data size given parameters (bytes): %s', size) logger.info('estimation of the offset value (bytes): %s', totsize - size) def write(self, fname): with open(fname, mode="wb") as outfile: outfile.write(self.data.tostring()) binaryimage = BinaryImage fabio-0.6.0/fabio/hdf5image.py0000644001611600070440000001272213227357030017201 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: FabIO X-ray image reader # # Copyright (C) 2010-2016 European Synchrotron Radiation Facility # Grenoble, France # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """HDF5 image for FabIO Authors: Jerome Kieffer email: Jerome.Kieffer@terre-adelie.org Specifications: input should being the form: filename::path Only supports ndim=2 or 3 (exposed as a stack of images """ # Get ready for python3: from __future__ import with_statement, print_function, division __authors__ = ["Jérôme Kieffer"] __contact__ = "Jerome.Kieffer@terre-adelie.org" __license__ = "MIT" __copyright__ = "Jérôme Kieffer" __date__ = "24/07/2017" import logging import os import posixpath import sys from .fabioimage import FabioImage logger = logging.getLogger(__name__) if sys.version_info[0] < 3: bytes = str try: import h5py except ImportError: h5py = None from .fabioutils import previous_filename, next_filename class Hdf5Image(FabioImage): """ FabIO image class for Images from an HDF file filename::dataset """ DESCRIPTION = "Hierarchical Data Format HDF5 flat reader" DEFAULT_EXTENSIONS = ["h5"] def __init__(self, *arg, **kwargs): """ Generic constructor """ if not h5py: raise RuntimeError("fabio.Hdf5Image cannot be used without h5py. Please install h5py and restart") FabioImage.__init__(self, *arg, **kwargs) self.hdf5 = None self.dataset = None def read(self, fname, frame=None): """ try to read image :param fname: filename::datasetpath """ self.resetvals() if "::" not in fname: err = "the '::' separator in mandatory for HDF5 container, absent in %s" % fname logger.error(err) raise RuntimeError(err) filename, datapath = fname.split("::", 1) self.filename = filename if os.path.isfile(self.filename): self.hdf5 = h5py.File(self.filename, "r") else: error = "No such file or directory: %s" % self.filename logger.error(error) raise RuntimeError(error) try: self.dataset = self.hdf5[datapath] except Exception as err: logger.error("No such datapath %s in %s, %s", datapath, filename, err) raise if isinstance(self.dataset, h5py.Group) and ("data" in self.dataset): datapath = posixpath.join(datapath, "data") logger.warning("The actual dataset is ") self.dataset = self.dataset["data"] # ndim does not exist for external links ? ndim = len(self.dataset.shape) if ndim == 3: self.nframes = self.dataset.shape[0] if frame is not None: self.currentframe = int(frame) else: self.currentframe = 0 self.data = self.dataset[self.currentframe, :, :] elif ndim == 2: self.data = self.dataset[:, :] else: err = "Only 2D and 3D datasets are supported by FabIO, here %sD" % self.dataset.ndim logger.error(err) raise RuntimeError(err) return self def getframe(self, num): """ Returns a frame as a new FabioImage object :param num: frame number """ if num < 0 or num > self.nframes: raise RuntimeError("Requested frame number %i is out of range [0, %i[ " % (num, self.nframes)) # Do a deep copy of the header to make a new one frame = self.__class__(header=self.header) frame.hdf5 = self.hdf5 frame.dataset = self.dataset frame.filename = self.filename frame.nframes = self.nframes frame.data = self.dataset[num, :, :] frame.currentframe = num return frame def next(self): """ Get the next image in a series as a fabio image """ if self.currentframe < (self.nframes - 1): return self.getframe(self.currentframe + 1) else: newobj = Hdf5Image() newobj.read(next_filename(self.filename)) return newobj def previous(self): """ Get the previous image in a series as a fabio image """ if self.currentframe > 0: return self.getframe(self.currentframe - 1) else: newobj = Hdf5Image() newobj.read(previous_filename(self.filename)) return newobj hdf5image = Hdf5Image fabio-0.6.0/fabio/jpeg2kimage.py0000644001611600070440000001155013227357030017533 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: FabIO X-ray image reader # # Copyright (C) 2010-2016 European Synchrotron Radiation Facility # Grenoble, France # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ FabIO class for dealing with JPEG 2000 images. """ from __future__ import with_statement, print_function, division __authors__ = ["Valentin Valls"] __date__ = "28/07/2017" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __status__ = "stable" import logging logger = logging.getLogger(__name__) try: import PIL except ImportError: PIL = None try: import glymur except ImportError: glymur = None from .fabioimage import FabioImage from .fabioutils import OrderedDict from .utils import pilutils class Jpeg2KImage(FabioImage): """ Images in JPEG 2000 format. It uses PIL or glymur libraries. """ DESCRIPTION = "JPEG 2000 format" DEFAULT_EXTENSIONS = ["jp2", "jpx", "j2k", "jpf", "jpg2"] _need_a_seek_to_read = True def __init__(self, *args, **kwds): """ Tifimage constructor adds an nbits member attribute """ self.nbits = None FabioImage.__init__(self, *args, **kwds) self.lib = "" self._decoders = OrderedDict() if PIL is not None: self._decoders["PIL"] = self._readWithPil if glymur is not None: self._decoders["glymur"] = self._readWithGlymur def _readWithPil(self, filename, infile): """Read data using PIL""" self.pilimage = PIL.Image.open(infile) data = pilutils.get_numpy_array(self.pilimage) self.data = data if self.pilimage and self.pilimage.info: for k, v in self.pilimage.info.items(): self.header[k] = v print(self.data) def _loadGlymurImage(self, filename, infile): """ Hack to use Glymur with Python file object This code was tested with all release 0.8.x """ # image = glymur.Jp2k(filename) # inject a shape to avoid calling the read function if not glymur.__version__.startswith("0.8."): raise IOError("Glymur version %s is not supported" % glymur.__version__) image = glymur.Jp2k(filename=filename, shape=(1, 1)) # Move to the end of the file to know the size infile.seek(0, 2) length = infile.tell() infile.seek(0) # initialize what it should already be done image.length = length image._shape = None # It is not the only one format supported by Glymur # but it is a simplification image._codec_format = glymur.lib.openjp2.CODEC_JP2 # parse the data image.box = image.parse_superbox(infile) try: image._validate() except Exception: logger.debug("Backtrace", exc_info=True) raise IOError("File %s is not a valid format" % filename) # Now the image can be used normaly return image def _readWithGlymur(self, filename, infile): """Read data using Glymur""" image = self._loadGlymurImage(filename, infile) self.data = image.read() def read(self, filename, frame=None): infile = self._open(filename, "rb") self.data = None for name, read in self._decoders.items(): try: infile.seek(0) read(filename, infile) self.lib = name break except IOError as e: self.data = None self.header = OrderedDict() logger.debug("Error while using %s library: %s" % (name, e), exc_info=True) pass if self.data is None: infile.seek(0) raise IOError("No decoder available for the file %s." % filename) self.resetvals() return self jpeg2kimage = Jpeg2KImage fabio-0.6.0/fabio/fabioutils.py0000644001611600070440000005422713227357030017517 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # """General purpose utilities functions for fabio """ from __future__ import absolute_import, print_function, with_statement, division __author__ = "Jérôme Kieffer" __contact__ = "Jerome.Kieffer@ESRF.eu" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __date__ = "29/09/2017" __status__ = "stable" __docformat__ = 'restructuredtext' import re import os import logging import sys import json logger = logging.getLogger(__name__) from .third_party.ordereddict import OrderedDict as _OrderedDict from .third_party import six if six.PY2: bytes_ = str FileIO = file StringTypes = (str, unicode) to_str = str else: bytes_ = bytes StringTypes = (str, bytes) unicode = str from io import FileIO to_str = lambda s: str(s, "ASCII") from .compression import bz2, gzip, COMPRESSORS import traceback from math import ceil if sys.version_info < (3, 3): from threading import _Semaphore as _Semaphore else: from threading import Semaphore as _Semaphore dictAscii = {None: [chr(i) for i in range(32, 127)]} def deprecated(func): """ used to deprecate a function/method: prints a lot of warning messages to enforce the modification of the code """ def wrapper(*arg, **kw): """ decorator that deprecates the use of a function """ logger.warning("%s is Deprecated !!! %s" % (func.func_name, os.linesep.join([""] + traceback.format_stack()[:-1]))) return func(*arg, **kw) return wrapper def pad(mystr, pattern=" ", size=80): """ Performs the padding of the string to the right size with the right pattern :param mystr: input string :param pattern: the filling pattern :param size: the size of the block :return: the padded string to a multiple of size """ size = int(size) padded_size = (len(mystr) + size - 1) // size * size if len(pattern) == 1: return mystr.ljust(padded_size, pattern) else: return (mystr + pattern * int(ceil(float(padded_size - len(mystr)) / len(pattern))))[:padded_size] def getnum(name): """ # try to figure out a file number # guess it starts at the back """ _stem, num, _post_num = numstem(name) try: return int(num) except ValueError: return None class FilenameObject(object): """ The 'meaning' of a filename ... """ def __init__(self, stem=None, num=None, directory=None, format_=None, extension=None, postnum=None, digits=4, filename=None): """ This class can either be instanciated by a set of parameters like directory, prefix, num, extension, ... :param stem: the stem is a kind of prefix (str) :param num: image number in the serie (int) :param directory: name of the directory (str) :param format_: ?? :param extension: :param postnum: :param digits: Number of digits used to print num Alternative constructor: :param filename: fullpath of an image file to be deconstructed into directory, prefix, num, extension, ... """ self.stem = stem self.num = num self.format = format_ self.extension = extension self.digits = digits self.postnum = postnum self.directory = directory self.compressed = None if filename is not None: self.deconstruct_filename(filename) def str(self): """ Return a string representation """ fmt = "stem %s, num %s format %s extension %s " + \ "postnum = %s digits %s dir %s" attrs = [self.stem, self.num, self.format, self.extension, self.postnum, self.digits, self.directory] return fmt % tuple([str(x) for x in attrs]) __repr__ = str def tostring(self): """ convert yourself to a string """ name = self.stem if self.digits is not None and self.num is not None: fmt = "%0" + str(self.digits) + "d" name += fmt % self.num if self.postnum is not None: name += self.postnum if self.extension is not None: name += self.extension if self.directory is not None: name = os.path.join(self.directory, name) return name def deconstruct_filename(self, filename): """ Break up a filename to get image type and number """ from . import fabioformats direc, name = os.path.split(filename) direc = direc or None parts = name.split(".") compressed = False stem = parts[0] extn = "" postnum = "" ndigit = 4 num = None typ = None if parts[-1].lower() in ["gz", "bz2"]: extn = "." + parts[-1] parts = parts[:-1] compressed = True codec_classes = fabioformats.get_classes_from_extension(parts[-1]) if len(codec_classes) > 0: typ = [] for codec in codec_classes: name = codec.codec_name() if name.endswith("image"): name = name[:-5] typ.append(name) extn = "." + parts[-1] + extn try: stem, numstring, postnum = numstem(".".join(parts[:-1])) num = int(numstring) ndigit = len(numstring) except Exception as err: # There is no number - hence make num be None, not 0 logger.debug("l242: %s" % err) num = None stem = "".join(parts[:-1]) else: # Probably two type left if len(parts) == 1: # Probably GE format stem_numb parts2 = parts[0].split("_") if parts2[-1].isdigit(): num = int(parts2[-1]) ndigit = len(parts2[-1]) typ = ['GE'] stem = "_".join(parts2[:-1]) + "_" else: try: num = int(parts[-1]) ndigit = len(parts[-1]) typ = ['bruker'] stem = ".".join(parts[:-1]) + "." except Exception as err: logger.debug("l262: %s" % err) typ = None extn = "." + parts[-1] + extn numstring = "" try: stem, numstring, postnum = numstem(".".join(parts[:-1])) except Exception as err: logger.debug("l269: %s" % err) raise if numstring.isdigit(): num = int(numstring) ndigit = len(numstring) # raise Exception("Cannot decode "+filename) self.stem = stem self.num = num self.directory = direc self.format = typ self.extension = extn self.postnum = postnum self.digits = ndigit self.compressed = compressed def numstem(name): """ cant see how to do without reversing strings Match 1 or more digits going backwards from the end of the string """ reg = re.compile(r"^(.*?)(-?[0-9]{0,9})(\D*)$") # reg = re.compile("""(\D*)(\d\d*)(\w*)""") try: res = reg.match(name).groups() # res = reg.match(name[::-1]).groups() # return [ r[::-1] for r in res[::-1]] if len(res[0]) == len(res[1]) == 0: # Hack for file without number return [res[2], '', ''] return [r for r in res] except AttributeError: # no digits found return [name, "", ""] # @deprecated def deconstruct_filename(filename): """ Function for backward compatibility. Deprecated """ return FilenameObject(filename=filename) def construct_filename(filename, frame=None): "Try to construct the filename for a given frame" fobj = FilenameObject(filename=filename) if frame is not None: fobj.num = frame return fobj.tostring() def next_filename(name, padding=True): """ increment number """ fobj = FilenameObject(filename=name) fobj.num += 1 if not padding: fobj.digits = 0 return fobj.tostring() def previous_filename(name, padding=True): """ decrement number """ fobj = FilenameObject(filename=name) fobj.num -= 1 if not padding: fobj.digits = 0 return fobj.tostring() def jump_filename(name, num, padding=True): """ jump to number """ fobj = FilenameObject(filename=name) fobj.num = num if not padding: fobj.digits = 0 return fobj.tostring() def extract_filenumber(name): """ extract file number """ fobj = FilenameObject(filename=name) return fobj.num def isAscii(name, listExcluded=None): """ :param name: string to check :param listExcluded: list of char or string excluded. :return: True of False whether name is pure ascii or not """ isascii = None try: name.encode("ASCII") except UnicodeDecodeError: isascii = False else: if listExcluded: isascii = not(any(bad in name for bad in listExcluded)) else: isascii = True return isascii def toAscii(name, excluded=None): """ :param name: string to check :param excluded: tuple of char or string excluded (not list: they are mutable). :return: the name with all non valid char removed """ if excluded not in dictAscii: ascii = dictAscii[None][:] for i in excluded: if i in ascii: ascii.remove(i) else: logger.error("toAscii: %s not in ascii table" % i) dictAscii[excluded] = ascii else: ascii = dictAscii[excluded] out = [i for i in str(name) if i in ascii] return "".join(out) def nice_int(s): """ Workaround that int('1.0') raises an exception :param s: string to be converted to integer """ try: return int(s) except ValueError: return int(float(s)) class BytesIO(six.BytesIO): """ just an interface providing the name and mode property to a BytesIO BugFix for MacOSX mainly """ def __init__(self, data, fname=None, mode="r"): six.BytesIO.__init__(self, data) if "closed" not in dir(self): self.closed = False if fname is None: self.name = "fabioStream" else: self.name = fname self.mode = mode self.lock = _Semaphore() self.__size = None def getSize(self): if self.__size is None: logger.debug("Measuring size of %s" % self.name) with self.lock: pos = self.tell() self.seek(0, os.SEEK_END) self.__size = self.tell() self.seek(pos) return self.__size def setSize(self, size): self.__size = size size = property(getSize, setSize) class File(FileIO): """ wrapper for "file" with locking """ def __init__(self, name, mode="rb", buffering=0, temporary=False): """file(name[, mode[, buffering]]) -> file object Open a file. The mode can be 'r', 'w' or 'a' for reading (default), writing or appending. The file will be created if it doesn't exist when opened for writing or appending; it will be truncated when opened for writing. Add a 'b' to the mode for binary files. Add a '+' to the mode to allow simultaneous reading and writing. If the buffering argument is given, 0 means unbuffered, 1 means line buffered, and larger numbers specify the buffer size. The preferred way to open a file is with the builtin open() function. Add a 'U' to mode to open the file for input with universal newline support. Any line ending in the input file will be seen as a '\n' in Python. Also, a file so opened gains the attribute 'newlines'; the value for this attribute is one of None (no newline read yet), '\r', '\n', '\r\n' or a tuple containing all the newline types seen. 'U' cannot be combined with 'w' or '+' mode. :param temporary: if True, destroy file at close. """ if six.PY2: FileIO.__init__(self, name, mode, buffering) else: # for python3 we drop buffering FileIO.__init__(self, name, mode) self.lock = _Semaphore() self.__size = None self.__temporary = temporary def __del__(self): """Explicit close at deletion """ if hasattr(self, "closed") and not self.closed: self.close() def close(self): name = self.name FileIO.close(self) if self.__temporary: try: os.unlink(name) except Exception as err: logger.error("Unable to remove %s: %s" % (name, err)) raise(err) def getSize(self): if self.__size is None: logger.debug("Measuring size of %s" % self.name) with self.lock: pos = self.tell() self.seek(0, os.SEEK_END) self.__size = self.tell() self.seek(pos) return self.__size def setSize(self, size): self.__size = size size = property(getSize, setSize) def __enter__(self): return self def __exit__(self, *args): """ Close the file. """ return FileIO.close(self) class UnknownCompressedFile(File): """ wrapper for "File" with locking """ def __init__(self, name, mode="rb", buffering=0): logger.warning("No decompressor found for this type of file (are gzip anf bz2 installed ???") File.__init__(self, name, mode, buffering) def __del__(self): """Explicit close at deletion """ if hasattr(self, "closed") and not self.closed: self.close() if gzip is None: GzipFile = UnknownCompressedFile else: class GzipFile(gzip.GzipFile): """ Just a wrapper for gzip.GzipFile providing the correct seek capabilities for python 2.5 """ def __init__(self, filename=None, mode=None, compresslevel=9, fileobj=None): """ Wrapper with locking for constructor for the GzipFile class. At least one of fileobj and filename must be given a non-trivial value. The new class instance is based on fileobj, which can be a regular file, a StringIO object, or any other object which simulates a file. It defaults to None, in which case filename is opened to provide a file object. When fileobj is not None, the filename argument is only used to be included in the gzip file header, which may includes the original filename of the uncompressed file. It defaults to the filename of fileobj, if discernible; otherwise, it defaults to the empty string, and in this case the original filename is not included in the header. The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb', depending on whether the file will be read or written. The default is the mode of fileobj if discernible; otherwise, the default is 'rb'. Be aware that only the 'rb', 'ab', and 'wb' values should be used for cross-platform portability. The compresslevel argument is an integer from 1 to 9 controlling the level of compression; 1 is fastest and produces the least compression, and 9 is slowest and produces the most compression. The default is 9. """ gzip.GzipFile.__init__(self, filename, mode, compresslevel, fileobj) self.lock = _Semaphore() self.__size = None def __del__(self): """Explicit close at deletion """ if hasattr(self, "closed") and not self.closed: self.close() def __repr__(self): return "fabio." + gzip.GzipFile.__repr__(self) def measure_size(self): if self.mode == gzip.WRITE: return self.size if self.__size is None: with self.lock: if self.__size is None: if "offset" in dir(self): pos = self.offset elif "tell" in dir(self): pos = self.tell() end_pos = len(gzip.GzipFile.read(self)) + pos self.seek(pos) logger.debug("Measuring size of %s: %s @ %s == %s" % (self.name, end_pos, pos, pos)) self.__size = end_pos return self.__size def __enter__(self): return self def __exit__(self, *args): """ Close the file. """ gzip.GzipFile.close(self) if bz2 is None: BZ2File = UnknownCompressedFile else: class BZ2File(bz2.BZ2File): "Wrapper with lock" def __init__(self, name, mode='r', buffering=0, compresslevel=9): """ BZ2File(name [, mode='r', compresslevel=9]) -> file object Open a bz2 file. The mode can be 'r' or 'w', for reading (default) or writing. When opened for writing, the file will be created if it doesn't exist, and truncated otherwise. If compresslevel is given, must be a number between 1 and 9. Add a 'U' to mode to open the file for input with universal newline support. Any line ending in the input file will be seen as a '\n' in Python. Also, a file so opened gains the attribute 'newlines'; the value for this attribute is one of None (no newline read yet), '\r', '\n', '\r\n' or a tuple containing all the newline types seen. Universal newlines are available only when reading. """ bz2.BZ2File.__init__(self, name, mode, buffering, compresslevel) self.lock = _Semaphore() self.__size = None def __del__(self): """Explicit close at deletion """ if hasattr(self, "closed") and not self.closed: self.close() def getSize(self): if self.__size is None: logger.debug("Measuring size of %s" % self.name) with self.lock: pos = self.tell() _ = self.read() self.__size = self.tell() self.seek(pos) return self.__size def setSize(self, value): self.__size = value size = property(getSize, setSize) def __enter__(self): return self def __exit__(self, *args): """ Close the file at exit """ bz2.BZ2File.close(self) class NotGoodReader(RuntimeError): """The reader used is probably not the good one """ pass class DebugSemaphore(_Semaphore): """ threading.Semaphore like class with helper for fighting dead-locks """ write_lock = _Semaphore() blocked = [] def __init__(self, *arg, **kwarg): _Semaphore.__init__(self, *arg, **kwarg) def acquire(self, *arg, **kwarg): if self._Semaphore__value == 0: with self.write_lock: self.blocked.append(id(self)) sys.stderr.write(os.linesep.join(["Blocking sem %s" % id(self)] + traceback.format_stack()[:-1] + [""])) return _Semaphore.acquire(self, *arg, **kwarg) def release(self, *arg, **kwarg): with self.write_lock: uid = id(self) if uid in self.blocked: self.blocked.remove(uid) sys.stderr.write("Released sem %s %s" % (uid, os.linesep)) _Semaphore.release(self, *arg, **kwarg) def __enter__(self): self.acquire() return self def __exit__(self, *arg, **kwarg): self.release() def exists(path): """Test whether a path exists. Replaces os.path.exists and handles in addition "::" based URI as defined in http://odo.pydata.org/en/latest/uri.html#separating-parts-with :param path: string :return: boolean """ return os.path.exists(path.split("::")[0]) class OrderedDict(_OrderedDict): """Ordered dictionary with pretty print""" def __repr__(self): try: res = json.dumps(self, indent=2) except Exception as err: logger.warning("Header is not JSON-serializable: %s", err) tmp = _OrderedDict() for key, value in self.items(): tmp[str(key)] = str(value) res = json.dumps(tmp, indent=2) return res fabio-0.6.0/fabio/compression.py0000644001611600070440000003634013227357030017713 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. """Compression and decompression algorithm for various formats Authors: Jérôme Kieffer, ESRF email:jerome.kieffer@esrf.fr """ # get ready for python3 from __future__ import absolute_import, print_function, with_statement, division __author__ = "Jérôme Kieffer" __contact__ = "jerome.kieffer@esrf.eu" __license__ = "MIT" __date__ = "06/10/2017" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" import sys import base64 import hashlib import logging import subprocess import numpy try: from .third_party import six except ImportError: import six if six.PY2: bytes = str logger = logging.getLogger(__name__) try: from .third_party import gzip except ImportError: logger.error("Unable to import gzip module: disabling gzip compression") gzip = None try: import bz2 except ImportError: logger.error("Unable to import bz2 module: disabling bz2 compression") bz2 = None try: import zlib except ImportError: logger.error("Unable to import zlib module: disabling zlib compression") zlib = None if sys.platform != "win32": WindowsError = OSError def is_incomplete_gz_block_exception(exception): """True if the exception looks to be generated when a GZ block is incomplete. :rtype: bool """ if six.PY2: if isinstance(exception, IOError): return "CRC check failed" in exception.args[0] elif six.PY3: version = sys.version_info[0:2] if version == (3, 3): import struct return isinstance(exception, struct.error) return isinstance(exception, EOFError) return False def md5sum(blob): """ returns the md5sum of an object... """ return base64.b64encode(hashlib.md5(blob).digest()) def endianness(): """ Return the native endianness of the system """ if numpy.little_endian: return "LITTLE_ENDIAN" else: return "BIG_ENDIAN" class ExternalCompressors(object): """Class to handle lazy discovery of external compression programs""" COMMANDS = {".bz2": ["bzip2" "-dcf"], ".gz": ["gzip", "-dcf"] } def __init__(self): """Empty constructor""" self.compressors = {} def __getitem__(self, key): """Implement the dict-like behavior""" if key not in self.compressors: if key in self.COMMANDS: commandline = self.COMMANDS[key] testline = [commandline[0], "-h"] try: lines = subprocess.check_output(testline, stderr=subprocess.STDOUT, universal_newlines=True) if "usage" in lines.lower(): self.compressors[key] = commandline else: self.compressors[key] = None except (subprocess.CalledProcessError, WindowsError) as err: logger.debug("No %s utility found: %s", commandline[0], err) self.compressors[key] = None else: self.compressors[key] = None return self.compressors[key] COMPRESSORS = ExternalCompressors() def decGzip(stream): """Decompress a chunk of data using the gzip algorithm from system or from Python :param stream: compressed data :return: uncompressed stream """ def _python_gzip(stream): """Inefficient implementation based on loops in Python""" for i in range(1, 513): try: fileobj = six.BytesIO(stream[:-i]) uncompessed = gzip.GzipFile(fileobj=fileobj).read() except IOError: logger.debug("trying with %s bytes less, doesn't work" % i) else: return uncompessed if gzip is None: raise ImportError("gzip module is not available") fileobj = six.BytesIO(stream) try: uncompessed = gzip.GzipFile(fileobj=fileobj).read() except IOError: logger.warning("Encounter the python-gzip bug with trailing garbage, trying subprocess gzip") cmd = COMPRESSORS[".gz"] if cmd: try: sub = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) uncompessed, err = sub.communicate(input=stream) logger.debug("Gzip subprocess ended with %s err= %s; I got %s bytes back" % (sub.wait(), err, len(uncompessed))) except OSError as error: logger.warning("Unable to use the subprocess gzip (%s). Is gzip available? " % error) uncompessed = _python_gzip(stream) else: uncompessed = _python_gzip(stream) if uncompessed is None: logger.error("I am totally unable to read this gzipped compressed data block, giving up") return uncompessed def decBzip2(stream): """ Decompress a chunk of data using the bzip2 algorithm from Python """ if bz2 is None: raise ImportError("bz2 module is not available") return bz2.decompress(stream) def decZlib(stream): """ Decompress a chunk of data using the zlib algorithm from Python """ if zlib is None: raise ImportError("zlib module is not available") return zlib.decompress(stream) def decByteOffset_numpy(stream, size=None, dtype="int64"): """ Analyze a stream of char with any length of exception: 2, 4, or 8 bytes integers :param stream: string representing the compressed data :param size: the size of the output array (of longInts) :return: 1D-ndarray """ logger.debug("CBF decompression using Numpy") listnpa = [] key16 = b"\x80" key32 = b"\x00\x80" key64 = b"\x00\x00\x00\x80" shift = 1 while True: idx = stream.find(key16) if idx == -1: listnpa.append(numpy.fromstring(stream, dtype="int8")) break listnpa.append(numpy.fromstring(stream[:idx], dtype="int8")) if stream[idx + 1:idx + 3] == key32: if stream[idx + 3:idx + 7] == key64: # 64 bits int res = numpy.fromstring(stream[idx + 7:idx + 15], dtype="int64") listnpa.append(res) shift = 15 else: # 32 bits int res = numpy.fromstring(stream[idx + 3:idx + 7], dtype="int32") listnpa.append(res) shift = 7 else: # int16 res = numpy.fromstring(stream[idx + 1:idx + 3], dtype="int16") listnpa.append(res) shift = 3 stream = stream[idx + shift:] if not numpy.little_endian: for res in listnpa: if res.dtype != numpy.int8: res.byteswap(True) return numpy.ascontiguousarray(numpy.hstack(listnpa), dtype).cumsum() def decByteOffset_cython(stream, size=None, dtype="int64"): """ Analyze a stream of char with any length of exception: 2, 4, or 8 bytes integers :param stream: string representing the compressed data :param size: the size of the output array (of longInts) :return: 1D-ndarray """ logger.debug("CBF decompression using cython") try: from .ext import byte_offset except ImportError as error: logger.error("Failed to import byte_offset cython module, falling back on numpy method: %s", error) return decByteOffset_numpy(stream, size, dtype=dtype) else: if dtype == "int32": return byte_offset.dec_cbf32(stream, size) else: return byte_offset.dec_cbf(stream, size) decByteOffset = decByteOffset_cython def compByteOffset_numpy(data): """ Compress a dataset into a string using the byte_offet algorithm :param data: ndarray :return: string/bytes with compressed data test = numpy.array([0,1,2,127,0,1,2,128,0,1,2,32767,0,1,2,32768,0,1,2,2147483647,0,1,2,2147483648,0,1,2,128,129,130,32767,32768,128,129,130,32768,2147483647,2147483648]) """ flat = numpy.ascontiguousarray(data.ravel(), numpy.int64) delta = numpy.zeros_like(flat) delta[0] = flat[0] delta[1:] = flat[1:] - flat[:-1] mask = abs(delta) > 127 exceptions = numpy.nonzero(mask)[0] if numpy.little_endian: byteswap = False else: byteswap = True start = 0 binary_blob = b"" for stop in exceptions: if stop - start > 0: binary_blob += delta[start:stop].astype("int8").tostring() exc = delta[stop] absexc = abs(exc) if absexc > 2147483647: # 2**31-1 binary_blob += b"\x80\x00\x80\x00\x00\x00\x80" if byteswap: binary_blob += delta[stop:stop + 1].byteswap().tostring() else: binary_blob += delta[stop:stop + 1].tostring() elif absexc > 32767: # 2**15-1 binary_blob += b"\x80\x00\x80" if byteswap: binary_blob += delta[stop:stop + 1].astype(numpy.int32).byteswap().tostring() else: binary_blob += delta[stop:stop + 1].astype(numpy.int32).tostring() else: # >127 binary_blob += b"\x80" if byteswap: binary_blob += delta[stop:stop + 1].astype(numpy.int16).byteswap().tostring() else: binary_blob += delta[stop:stop + 1].astype(numpy.int16).tostring() start = stop + 1 if start < delta.size: binary_blob += delta[start:].astype(numpy.int8).tostring() return binary_blob def compByteOffset_cython(data): """ Compress a dataset into a string using the byte_offet algorithm :param data: ndarray :return: string/bytes with compressed data test = numpy.array([0,1,2,127,0,1,2,128,0,1,2,32767,0,1,2,32768,0,1,2,2147483647,0,1,2,2147483648,0,1,2,128,129,130,32767,32768,128,129,130,32768,2147483647,2147483648]) """ logger.debug("CBF compression using cython") try: from .ext import byte_offset except ImportError as error: logger.error("Failed to import byte_offset cython module, falling back on numpy method: %s", error) return compByteOffset_numpy(data) else: if "int32" in str(data.dtype): return byte_offset.comp_cbf32(data).tostring() else: return byte_offset.comp_cbf(data).tostring() compByteOffset = compByteOffset_cython def decTY1(raw_8, raw_16=None, raw_32=None): """ Modified byte offset decompressor used in Oxford Diffraction images Note: Always expect little endian data on the disk :param raw_8: strings containing raw data with integer 8 bits :param raw_16: strings containing raw data with integer 16 bits :param raw_32: strings containing raw data with integer 32 bits :return: numpy.ndarray """ data = numpy.fromstring(raw_8, dtype="uint8").astype(int) data -= 127 if raw_32 is not None: int32 = numpy.fromstring(raw_32, dtype="int32") if not numpy.little_endian: int32.byteswap(True) exception32 = numpy.nonzero(data == 128) if raw_16 is not None: int16 = numpy.fromstring(raw_16, dtype="int16") if not numpy.little_endian: int16.byteswap(True) exception16 = numpy.nonzero(data == 127) data[exception16] = int16 if raw_32: data[exception32] = int32 summed = data.cumsum() smax = summed.max() if (smax > (2 ** 31 - 1)): bytecode = "int64" elif (smax > (2 ** 15 - 1)): bytecode = "int32" elif (smax > (2 ** 7 - 1)): bytecode = "int16" else: bytecode = "int8" return summed.astype(bytecode) decKM4CCD = decTY1 def compTY1(data): """ Modified byte offset compressor used in Oxford Diffraction images :param data: numpy.ndarray with the input data (integers!) :return: 3-tuple of strings: raw_8,raw_16,raw_32 containing raw data with integer of the given size """ fdata = data.ravel() diff = numpy.zeros_like(fdata) diff[0] = fdata[0] diff[1:] = fdata[1:] - fdata[:-1] adiff = abs(diff) exception32 = (adiff > (1 << 15) - 1) exception16 = (adiff >= (1 << 7) - 1) ^ exception32 we16 = numpy.where(exception16) we32 = numpy.where(exception32) data_16 = diff[we16].astype(numpy.int16) data_32 = diff[we32].astype(numpy.int32) if not numpy.little_endian: data_16.byteswap(True) data_32.byteswap(True) diff[we16] = 127 diff[we32] = 128 diff += 127 data_8 = diff.astype(numpy.uint8) return data_8.tostring(), data_16.tostring(), data_32.tostring() def decPCK(stream, dim1=None, dim2=None, overflowPix=None, version=None, normal_start=None, swap_needed=None): """ Modified CCP4 pck decompressor used in MAR345 images :param raw: input string (bytes in python3) :param dim1,dim2: optional parameters size :param overflowPix: optional parameters: number of overflowed pixels :param version: PCK version 1 or 2 :param normal_start: position of the normal value section (can be auto-guessed) :param swap_needed: set to True when reading data from a foreign endianness (little on big or big on little) :return: ndarray of 2D with the right size """ try: from .ext.mar345_IO import uncompress_pck except ImportError as error: raise RuntimeError("Unable to import mar345_IO to read compressed dataset: %s" % error) if "seek" in dir(stream): stream.seek(0) raw = stream.read() else: raw = bytes(stream) return uncompress_pck(raw, dim1, dim2, overflowPix, version, normal_start, swap_needed) def compPCK(data): """ Modified CCP4 pck compressor used in MAR345 images :param data: numpy.ndarray (square array) :return: compressed stream """ try: from .ext.mar345_IO import compress_pck except ImportError as error: raise RuntimeError("Unable to import mar345_IO to write compressed dataset: %s" % error) return compress_pck(data) fabio-0.6.0/fabio/marccdimage.py0000644001611600070440000003555113227357030017611 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """ Authors: ........ * Henning O. Sorensen & Erik Knudsen: Center for Fundamental Research: Metal Structures in Four Dimensions; Risoe National Laboratory; Frederiksborgvej 399; DK-4000 Roskilde; email:erik.knudsen@risoe.dk * Jon Wright: European Synchrotron Radiation Facility; Grenoble (France) marccdimage can read MarCCD and MarMosaic images including header info. JPW : Use a parser in case of typos (sorry?) """ # Get ready for python3: from __future__ import with_statement, print_function, absolute_import # Base this on the tifimage (as marccd seems to be tiff with a # special header import logging import struct from .tifimage import TifImage logger = logging.getLogger(__name__) # Now for the c definition (found on mar webpage) # The following string is therefore copyrighted by Mar I guess CDEFINITION = """ typedef struct frame_header_type { /* File/header format parameters (256 bytes) */ UINT32 header_type; /* flag for header type (can be used as magic number) */ char header_name[16]; /* header name (MMX) */ UINT32 header_major_version; /* header_major_version (n.) */ UINT32 header_minor_version; /* header_minor_version (.n) */ UINT32 header_byte_order;/* BIG_ENDIAN (Motorola,MIPS); LITTLE_ENDIAN (DEC, Intel) */ UINT32 data_byte_order; /* BIG_ENDIAN (Motorola,MIPS); LITTLE_ENDIAN (DEC, Intel) */ UINT32 header_size; /* in bytes */ UINT32 frame_type; /* flag for frame type */ UINT32 magic_number; /* to be used as a flag - usually to indicate new file */ UINT32 compression_type; /* type of image compression */ UINT32 compression1; /* compression parameter 1 */ UINT32 compression2; /* compression parameter 2 */ UINT32 compression3; /* compression parameter 3 */ UINT32 compression4; /* compression parameter 4 */ UINT32 compression5; /* compression parameter 4 */ UINT32 compression6; /* compression parameter 4 */ UINT32 nheaders; /* total number of headers */ UINT32 nfast; /* number of pixels in one line */ UINT32 nslow; /* number of lines in image */ UINT32 depth; /* number of bytes per pixel */ UINT32 record_length; /* number of pixels between succesive rows */ UINT32 signif_bits; /* true depth of data, in bits */ UINT32 data_type; /* (signed,unsigned,float...) */ UINT32 saturated_value; /* value marks pixel as saturated */ UINT32 sequence; /* TRUE or FALSE */ UINT32 nimages; /* total number of images - size of each is nfast*(nslow/nimages) */ UINT32 origin; /* corner of origin */ UINT32 orientation; /* direction of fast axis */ UINT32 view_direction; /* direction to view frame */ UINT32 overflow_location;/* FOLLOWING_HEADER, FOLLOWING_DATA */ UINT32 over_8_bits; /* # of pixels with counts 255 */ UINT32 over_16_bits; /* # of pixels with count 65535 */ UINT32 multiplexed; /* multiplex flag */ UINT32 nfastimages; /* # of images in fast direction */ UINT32 nslowimages; /* # of images in slow direction */ UINT32 background_applied;/* flags correction has been applied hold magic number ? */ UINT32 bias_applied; /* flags correction has been applied hold magic number ? */ UINT32 flatfield_applied;/* flags correction has been applied - hold magic number ? */ UINT32 distortion_applied;/*flags correction has been applied - hold magic number ? */ UINT32 original_header_type; /* Header/frame type from file that frame is read from */ UINT32 file_saved; /* Flag that file has been saved, should be zeroed if modified */ char reserve1[(64-40)*sizeof(INT32)-16]; /* Data statistics (128) */ UINT32 total_counts[2]; /* 64 bit integer range = 1.85E19*/ UINT32 special_counts1[2]; UINT32 special_counts2[2]; UINT32 min; UINT32 max; UINT32 mean; UINT32 rms; UINT32 p10; UINT32 p90; UINT32 stats_uptodate; UINT32 pixel_noise[MAXIMAGES]; /*1000*base noise value (ADUs) */ char reserve2[(32-13-MAXIMAGES)*sizeof(INT32)]; /* More statistics (256) */ UINT16 percentile[128]; /* Goniostat parameters (128 bytes) */ INT32 xtal_to_detector; /* 1000*distance in millimeters */ INT32 beam_x; /* 1000*x beam position (pixels) */ INT32 beam_y; /* 1000*y beam position (pixels) */ INT32 integration_time; /* integration time in milliseconds */ INT32 exposure_time; /* exposure time in milliseconds */ INT32 readout_time; /* readout time in milliseconds */ INT32 nreads; /* number of readouts to get this image */ INT32 start_twotheta; /* 1000*two_theta angle */ INT32 start_omega; /* 1000*omega angle */ INT32 start_chi; /* 1000*chi angle */ INT32 start_kappa; /* 1000*kappa angle */ INT32 start_phi; /* 1000*phi angle */ INT32 start_delta; /* 1000*delta angle */ INT32 start_gamma; /* 1000*gamma angle */ INT32 start_xtal_to_detector; /* 1000*distance in mm (dist in um)*/ INT32 end_twotheta; /* 1000*two_theta angle */ INT32 end_omega; /* 1000*omega angle */ INT32 end_chi; /* 1000*chi angle */ INT32 end_kappa; /* 1000*kappa angle */ INT32 end_phi; /* 1000*phi angle */ INT32 end_delta; /* 1000*delta angle */ INT32 end_gamma; /* 1000*gamma angle */ INT32 end_xtal_to_detector; /* 1000*distance in mm (dist in um)*/ INT32 rotation_axis; /* active rotation axis */ INT32 rotation_range; /* 1000*rotation angle */ INT32 detector_rotx; /* 1000*rotation of detector around X */ INT32 detector_roty; /* 1000*rotation of detector around Y */ INT32 detector_rotz; /* 1000*rotation of detector around Z */ char reserve3[(32-28)*sizeof(INT32)]; /* Detector parameters (128 bytes) */ INT32 detector_type; /* detector type */ INT32 pixelsize_x; /* pixel size (nanometers) */ INT32 pixelsize_y; /* pixel size (nanometers) */ INT32 mean_bias; /* 1000*mean bias value */ INT32 photons_per_100adu; /* photons / 100 ADUs */ INT32 measured_bias[MAXIMAGES];/* 1000*mean bias value for each image*/ INT32 measured_temperature[MAXIMAGES]; /* Temperature of each detector in milliKelvins */ INT32 measured_pressure[MAXIMAGES]; /* Pressure of each chamber in microTorr */ /* Retired reserve4 when MAXIMAGES set to 9 from 16 and two fields removed, and temp and pressure added char reserve4[(32-(5+3*MAXIMAGES))*sizeof(INT32)] */ /* X-ray source and optics parameters (128 bytes) */ /* X-ray source parameters (8*4 bytes) */ INT32 source_type; /* (code) - target, synch. etc */ INT32 source_dx; /* Optics param. - (size microns) */ INT32 source_dy; /* Optics param. - (size microns) */ INT32 source_wavelength; /* wavelength (femtoMeters) */ INT32 source_power; /* (Watts) */ INT32 source_voltage; /* (Volts) */ INT32 source_current; /* (microAmps) */ INT32 source_bias; /* (Volts) */ INT32 source_polarization_x; /* () */ INT32 source_polarization_y; /* () */ char reserve_source[4*sizeof(INT32)]; /* X-ray optics_parameters (8*4 bytes) */ INT32 optics_type; /* Optics type (code)*/ INT32 optics_dx; /* Optics param. - (size microns) */ INT32 optics_dy; /* Optics param. - (size microns) */ INT32 optics_wavelength; /* Optics param. - (size microns) */ INT32 optics_dispersion; /* Optics param. - (*10E6) */ INT32 optics_crossfire_x; /* Optics param. - (microRadians) */ INT32 optics_crossfire_y; /* Optics param. - (microRadians) */ INT32 optics_angle; /* Optics param. - (monoch. 2theta - microradians) */ INT32 optics_polarization_x; /* () */ INT32 optics_polarization_y; /* () */ char reserve_optics[4*sizeof(INT32)]; char reserve5[((32-28)*sizeof(INT32))]; /* File parameters (1024 bytes) */ char filetitle[128]; /* Title */ char filepath[128]; /* path name for data file */ char filename[64]; /* name of data file */ char acquire_timestamp[32]; /* date and time of acquisition */ char header_timestamp[32]; /* date and time of header update */ char save_timestamp[32]; /* date and time file saved */ char file_comments[512]; /* comments, use as desired */ char reserve6[1024-(128+128+64+(3*32)+512)]; /* Dataset parameters (512 bytes) */ char dataset_comments[512]; /* comments, used as desired */ /* pad out to 3072 bytes */ char pad[3072-(256+128+256+(3*128)+1024+512)]; } frame_header; """ # Convert mar c header file types to python struct module types C_TO_STRUCT = { "INT32": "i", "UINT32": "I", "char": "c", "UINT16": "H"} # Sizes (bytes) of mar c header objects C_SIZES = { "INT32": 4, "UINT32": 4, "char": 1, "UINT16": 2} # This was worked out by trial and error from a trial image I think MAXIMAGES = 9 def make_format(c_def_string): """ Reads the header definition in c and makes the format string to pass to struct.unpack """ lines = c_def_string.split("\n") fmt = "" names = [] expected = 0 for line in lines: if line.find(";") == -1: continue decl = line.split(";")[0].lstrip().rstrip() try: [typ, name] = decl.split() except ValueError: logger.debug("skipping: %s", line) continue if name.find("[") > -1: # repeated ... times try: num = name.split("[")[1].split("]")[0] num = num.replace("MAXIMAGES", str(MAXIMAGES)) num = num.replace("sizeof(INT32)", "4") times = eval(num) except Exception as error: logger.error("%s Please decode %s", error, decl) raise error else: times = 1 try: fmt += C_TO_STRUCT[typ] * times names += [name] * times expected += C_SIZES[typ] * times except KeyError: continue return names, fmt # Make these be compiled on loading module HEADER_NAMES, HEADER_FORMAT = make_format(CDEFINITION) def interpret_header(header, fmt, names): """ given a format and header interpret it """ values = struct.unpack(fmt, header) hdr = {} i = 0 for name in names: if name in hdr: if isinstance(values[i], str): hdr[name] = hdr[name] + values[i] else: try: hdr[name].append(values[i]) except AttributeError: hdr[name] = [hdr[name], values[i]] else: hdr[name] = values[i] i = i + 1 return hdr class MarccdImage(TifImage): """ Read in data in mar ccd format, also MarMosaic images, including header info """ DESCRIPTION = "File format from MarCCD and MarMosaic images" DEFAULT_EXTENSIONS = ["mccd"] def _readheader(self, infile): """ Parser based approach Gets all entries """ infile.seek(1024) hstr = infile.read(3072) self.header = interpret_header(hstr, HEADER_FORMAT, HEADER_NAMES) def _read(self, fname): """ inherited from tifimage ... a marccd image *is a* tif image just with a header """ return TifImage.read(self, fname) marccdimage = MarccdImage fabio-0.6.0/fabio/adscimage.py0000644001611600070440000001153213227357030017263 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # """ Authors: Henning O. Sorensen & Erik Knudsen Center for Fundamental Research: Metal Structures in Four Dimensions Risoe National Laboratory Frederiksborgvej 399 DK-4000 Roskilde email:erik.knudsen@risoe.dk + mods for fabio by JPW """ # Get ready for python3: from __future__ import with_statement, print_function import numpy import logging from .fabioimage import FabioImage from .fabioutils import to_str logger = logging.getLogger(__name__) class AdscImage(FabioImage): """ Read an image in ADSC format (quite similar to edf?) """ DESCRIPTION = "ADSC format (from Area Detector Systems Corporation)" DEFAULT_EXTENSIONS = ["img"] def __init__(self, *args, **kwargs): FabioImage.__init__(self, *args, **kwargs) def read(self, fname, frame=None): """ read in the file """ with self._open(fname, "rb") as infile: try: self._readheader(infile) except: raise Exception("Error processing adsc header") # banned by bzip/gzip??? try: infile.seek(int(self.header['HEADER_BYTES']), 0) except TypeError: # Gzipped does not allow a seek and read header is not # promising to stop in the right place infile.close() infile = self._open(fname, "rb") infile.read(int(self.header['HEADER_BYTES'])) binary = infile.read() # infile.close() # now read the data into the array self.dim1 = int(self.header['SIZE1']) self.dim2 = int(self.header['SIZE2']) data = numpy.fromstring(binary, numpy.uint16) if self.swap_needed(): data.byteswap(True) try: data.shape = (self.dim2, self.dim1) except ValueError: raise IOError('Size spec in ADSC-header does not match ' + 'size of image data field %sx%s != %s' % (self.dim1, self.dim2, data.size)) self.data = data self.bytecode = numpy.uint16 self.resetvals() return self def _readheader(self, infile): """ read an adsc header """ line = infile.readline() bytesread = len(line) while b'}' not in line: if b'=' in line: (key, val) = to_str(line).split('=') self.header[key.strip()] = val.strip(' ;\n\r') line = infile.readline() bytesread = bytesread + len(line) def write(self, fname): """ Write adsc format """ out = b'{\n' for key in self.header: out += b"%s = %s;\n" % (key, self.header[key]) if "HEADER_BYTES" in self.header: pad = int(self.header["HEADER_BYTES"]) - len(out) - 2 else: # hsize = ((len(out) + 23) // 512 + 1) * 512 hsize = (len(out) + 533) & ~(512 - 1) out += b"HEADER_BYTES=%d;\n" % (hsize) pad = hsize - len(out) - 2 out += pad * b' ' + b"}\n" assert len(out) % 512 == 0, "Header is not multiple of 512" data = self.data.astype(numpy.uint16) if self.swap_needed(): data.byteswap(True) with open(fname, "wb") as outf: outf.write(out) outf.write(data.tostring()) # outf.close() def swap_needed(self): if "BYTE_ORDER" not in self.header: logger.warning("No byte order specified, assuming little_endian") BYTE_ORDER = "little_endian" else: BYTE_ORDER = self.header["BYTE_ORDER"] if "little" in BYTE_ORDER and numpy.little_endian: return False elif "big" in BYTE_ORDER and not numpy.little_endian: return False elif "little" in BYTE_ORDER and not numpy.little_endian: return True elif "big" in BYTE_ORDER and numpy.little_endian: return True adscimage = AdscImage fabio-0.6.0/fabio/openimage.py0000644001611600070440000001762313227357030017321 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """ Authors: Henning O. Sorensen & Erik Knudsen Center for Fundamental Research: Metal Structures in Four Dimensions Risoe National Laboratory Frederiksborgvej 399 DK-4000 Roskilde email:henning.sorensen@risoe.dk mods for fabio by JPW modification for HDF5 by Jérôme Kieffer """ # Get ready for python3: from __future__ import with_statement, print_function, absolute_import import sys import logging logger = logging.getLogger(__name__) from .fabioutils import FilenameObject, six, BytesIO from .fabioimage import FabioImage # Make sure to load all formats from . import fabioformats # noqa MAGIC_NUMBERS = [ # "\42\5a" : 'bzipped' # "\1f\8b" : 'gzipped' (b"FORMAT : 86", 'bruker'), (b"\x4d\x4d\x00\x2a", 'tif'), # The marCCD and Pilatus formats are both standard tif with a header # hopefully these byte patterns are unique for the formats # If not the image will be read, but the is missing (b"\x49\x49\x2a\x00\x08\x00", 'marccd/tif'), (b"\x49\x49\x2a\x00\x82\x00", 'pilatus'), (b"\x49\x49\x2a\x00", 'tif'), # ADSC must come before edf (b"{\nHEA", 'adsc'), (b"{", 'edf'), (b"\r{", 'edf'), (b"\n{", 'edf'), (b"ADEPT", 'GE'), (b"OD", 'OXD'), (b"IM", 'HiPiC'), (b'\x2d\x04', 'mar345'), (b'\xd2\x04', 'mar345'), (b'\x04\x2d', 'mar345'), # some machines may need byteswapping (b'\x04\xd2', 'mar345'), # hint : MASK in 32 bit (b'M\x00\x00\x00A\x00\x00\x00S\x00\x00\x00K\x00\x00\x00', 'fit2dmask'), (b'\x00\x00\x00\x03', 'dm3'), (b"No", "kcd"), (b"<", "xsd"), (b"\n\xb8\x03\x00", 'pixi'), (b"\x89\x48\x44\x46\x0d\x0a\x1a\x0a", "eiger/hdf5"), (b"R-AXIS", 'raxis'), (b"\x93NUMPY", 'numpy'), (b"\\$FFF_START", 'fit2d'), # Raw JPEG (b"\xFF\xD8\xFF\xDB", "jpeg"), # JFIF format (b"\xFF\xD8\xFF\xE0", "jpeg"), # Exif format (b"\xFF\xD8\xFF\xE1", "jpeg"), # JPEG 2000 (from RFC 3745) (b"\x00\x00\x00\x0C\x6A\x50\x20\x20\x0D\x0A\x87\x0A", "jpeg2k"), ] def do_magic(byts, filename): """ Try to interpret the bytes starting the file as a magic number """ for magic, format_type in MAGIC_NUMBERS: if byts.startswith(magic): if "/" in format_type: if format_type == "eiger/hdf5": if "::" in filename: return "hdf5" else: return "eiger" elif format_type == "marccd/tif": if "mccd" in filename.split("."): return "marccd" else: return "tif" return format_type raise Exception("Could not interpret magic string") def openimage(filename, frame=None): """ Try to open an image """ if isinstance(filename, FilenameObject): try: logger.debug("Attempting to open %s" % (filename.tostring())) obj = _openimage(filename.tostring()) logger.debug("Attempting to read frame %s from %s with reader %s" % (frame, filename.tostring(), obj.classname)) obj = obj.read(filename.tostring(), frame) except Exception as ex: # multiframe file # logger.debug( "DEBUG: multiframe file, start # %d"%( # filename.num) logger.debug("Exception %s, trying name %s" % (ex, filename.stem)) obj = _openimage(filename.stem) logger.debug("Reading frame %s from %s" % (filename.num, filename.stem)) obj.read(filename.stem, frame=filename.num) else: logger.debug("Attempting to open %s" % (filename)) obj = _openimage(filename) logger.debug("Attempting to read frame %s from %s with reader %s" % (frame, filename, obj.classname)) obj = obj.read(obj.filename, frame) return obj def openheader(filename): """ return only the header""" obj = _openimage(filename) obj.readheader(obj.filename) return obj def _openimage(filename): """ determine which format for a filename and return appropriate class which can be used for opening the image :param filename: can be an url like: hdf5:///example.h5?entry/instrument/detector/data/data#slice=[:,:,5] """ try: url = six.moves.urllib_parse.urlparse(filename) except AttributeError as err: # Assume we have as input a BytesIO object attrs = dir(filename) if "seek" in attrs and "read" in attrs: if not isinstance(filename, BytesIO): filename.seek(0) actual_filename = BytesIO(filename.read()) else: actual_filename = filename url = six.moves.urllib_parse.urlparse("") else: # related to https://github.com/silx-kit/fabio/issues/34 if (len(url.scheme) == 1 and (sys.platform == "win32")) or url.path.startswith(":"): # this is likely a C: from windows or filename::path filename = url.scheme + ":" + url.path else: filename = url.path actual_filename = filename.split("::")[0] try: imo = FabioImage() with imo._open(actual_filename) as f: magic_bytes = f.read(18) except IOError as error: logger.debug("%s: File probably does not exist", error) raise error else: imo = None filetype = None try: filetype = do_magic(magic_bytes, filename) except Exception: logger.debug("Backtrace", exc_info=True) try: file_obj = FilenameObject(filename=filename) if file_obj is None: raise Exception("Unable to deconstruct filename") if (file_obj.format is not None) and\ len(file_obj.format) != 1 and \ isinstance(file_obj.format, list): # one of OXD/ADSC - should have got in previous raise Exception("openimage failed on magic bytes & name guess") filetype = file_obj.format except Exception as error: logger.debug("Backtrace", exc_info=True) raise IOError("Fabio could not identify " + filename) if filetype is None: raise IOError("Fabio could not identify " + filename) klass_name = "".join(filetype) + 'image' try: obj = FabioImage.factory(klass_name) except (RuntimeError, Exception): logger.debug("Backtrace", exc_info=True) raise IOError("Filename %s can't be read as format %s" % (filename, klass_name)) if url.scheme in ["nxs", "hdf5"] and filetype == "hdf5": obj.set_url(url) obj.filename = filename # skip the read for read header return obj fabio-0.6.0/fabio/ext/0000755001611600070440000000000013227375744015606 5ustar kiefferscisoft00000000000000fabio-0.6.0/fabio/ext/cf_io.pyx0000644001611600070440000000767013227357030017425 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # Copyright (C) 2015 European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """New Cython version of cf_iomodule.c for preparing the migration to Python3""" __authors__ = ["Jerome Kieffer"] __contact__ = "jerome.kieffer@esrf.eu" __license__ = "MIT" __copyright__ = "2013, European Synchrotron Radiation Facility, Grenoble, France" import cython cimport numpy import numpy import os import tempfile import logging logger = logging.getLogger(__name__) from libc.string cimport memcpy from libc.stdio cimport fopen, FILE CF_H = 1 CF_INIT_ROWS = 8192 CF_INIT_COLS = 32 CF_HEADER_ITEM = 128 CF_GZ_COMP = 1 CF_BIN = 2 cdef extern from "columnfile.h": struct cf_data: int ncols, nrows unsigned int nralloc double **data char **clabels void * cf_read_ascii(void *fp, void *dest, unsigned int FLAGS)nogil void * cf_read_bin(void *fp, void *dest, unsigned int FLAGS)nogil int cf_write(char *fname, void *cf_handle, unsigned int FLAGS)nogil int cf_write_bin(void *fp, void *cf_handle)nogil int cf_write_ascii(void *fp, void *cf_handle,unsigned int FLAGS)nogil void cf_free( cf_data *cf_handle)nogil def read(py_file, mode="a"): """ Call the c-columnfile reading interface. The mode keyword argument is either: "a" for ascii (the default) "b" for binary """ cdef cf_data *cf__ cdef unsigned int flags = 0, fd # perhaps not const cdef int i cdef FILE *file # Here is a big issue !!! and I got an even worse solution ! # file = PyFile_AsFile(py_file) (fd, fname) = tempfile.mkstemp() os.fdopen(fd, mode="wb").write(py_file.read()) os.close(fd) file = fopen(fname, "r") if "z" in mode: flags |= CF_GZ_COMP if "b" in mode: cf__ = cf_read_bin(file, NULL, flags) elif "a" in mode: cf__ = cf_read_ascii(file, NULL, flags) else: logger.error("Unrecognized mode for columnfile %s (assuming ascii)", mode) cf__ = cf_read_ascii(file, NULL, flags) # check for failure to read if (cf__ == NULL): return None, None dims = (cf__.nrows, cf__.ncols) # since data may be non-contigous we can't simply create a numpy-array from # cf__->data, as Numpy's memory model prohibits it # i.e. py_data=(PyArrayObject*)PyArray_SimpleNewFromData(2, dims, NPY_DOUBLE, (void*)(&(cf__->data[0][0]))) # won't work cdef numpy.ndarray[numpy.float64_t, ndim=2] py_data = numpy.empty(dims, dtype=numpy.float64) for i in range(cf__.nrows): memcpy(&py_data[i, 0], cf__.data[i], cf__.ncols * sizeof(double)) clabels = [] for i in range(cf__.ncols): clabels.append(str(cf__.clabels[i])) cf_free(cf__) return py_data, clabels fabio-0.6.0/fabio/ext/mar345_IO.pyx0000644001611600070440000006122413227357030017743 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # Copyright (C) 2015 European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ New Cython version of mar345_IO for preparing the migration to Python3 Compressor & decompressor for "pack" algorithm by JPA, binding to CCP4 libraries those libraries are re-implemented in Cython. Known bugs: ----------- The precomp/postdec part need to be performed operation in int16 and exports uint16. Some calculation are overflowing, this is needed to reproduce the original implementation which is buggy """ __authors__ = ["Jerome Kieffer", "Gael Goret", "Thomas Vincent"] __contact__ = "jerome.kieffer@esrf.eu" __license__ = "MIT" __copyright__ = "2012-2016, European Synchrotron Radiation Facility, Grenoble, France" __date__ = "11/08/2017" import cython cimport numpy as cnp import numpy import os import tempfile import logging logger = logging.getLogger(__name__) ctypedef fused any_int_t: cnp.int8_t cnp.int16_t cnp.int32_t cnp.int64_t # Few constants: cdef: cnp.uint8_t *CCP4_PCK_BIT_COUNT = [0, 4, 5, 6, 7, 8, 16, 32] cnp.uint8_t *CCP4_BITSIZE = [0, 0, 0, 0, 1, 2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7] int CCP4_PCK_BLOCK_HEADER_LENGTH = 6 cdef extern from "ccp4_pack.h": void* mar345_read_data_string(char *instream, int ocount, int dim1, int dim2) nogil void pack_wordimage_c(short int*, int , int , char*) nogil void* ccp4_unpack_string (void *, void *, size_t, size_t, size_t) nogil void* ccp4_unpack_v2_string(void *, void *, size_t, size_t, size_t) nogil cdef int PACK_SIZE_HIGH = 8 @cython.boundscheck(False) def compress_pck(image not None, bint use_CCP4=False): """ :param image: numpy array as input :param use_CCP4: use the former LGPL implementation provided by CCP4 :return: binary stream """ cdef: cnp.uint32_t size, dim0, dim1, i, j int fd, ret char* name cnp.int16_t[::1] data cnp.int32_t[::1] raw bytes output assert image.ndim == 2, "Input image shape is 2D" size = image.size dim0 = image.shape[0] dim1 = image.shape[1] data = numpy.ascontiguousarray(image.ravel(), dtype=numpy.int16) if use_CCP4: (fd, fname) = tempfile.mkstemp() fname = fname.encode("ASCII") name = fname with nogil: pack_wordimage_c( &data[0], dim1, dim0, name) with open(name, "rb") as f: f.seek(0) output = f.read() os.close(fd) os.unlink(fname) else: output = ("\nCCP4 packed image, X: %04d, Y: %04d\n" % (dim1, dim0)).encode("ASCII") raw = precomp(data, dim1) cont = pack_image(raw, False) output += cont.get().tostring() return output @cython.boundscheck(False) @cython.cdivision(True) def uncompress_pck(bytes raw not None, dim1=None, dim2=None, overflowPix=None, version=None, normal_start=None, swap_needed=None, bint use_CCP4=False): """ Unpack a mar345 compressed image :param raw: input string (bytes in python3) :param dim1,dim2: optional parameters size :param overflowPix: optional parameters: number of overflowed pixels :param version: PCK version 1 or 2 :param normal_start: position of the normal value section (can be auto-guessed) :param swap_needed: set to True when reading data from a foreign endianness (little on big or big on little) :return: ndarray of 2D with the right size """ cdef: int cdimx, cdimy, chigh, cversion, records, normal_offset, lenkey, i, stop, idx, value cnp.uint32_t[:, ::1] data cnp.uint8_t[::1] instream cnp.int32_t[::1] unpacked cnp.int32_t[:, ::1] overflow_data # handles overflows void* out end = None key1 = b"CCP4 packed image, X: " key2 = b"CCP4 packed image V2, X: " if (dim1 is None) or (dim2 is None) or \ (version not in [1, 2]) or \ (version is None) or \ (normal_start is None): start = raw.find(key2) key = key2 cversion = 2 if start == -1: start = raw.find(key1) key = key1 cversion = 1 lenkey = len(key) start = raw.index(key) + lenkey sizes = raw[start:start + 13] cdimx = < int > int(sizes[:4]) cdimy = < int > int(sizes[-4:]) normal_offset = start + 13 else: cdimx = < int > dim1 cdimy = < int > dim2 cversion = version normal_offset = normal_start if cversion == 1: lenkey = len(key1) else: lenkey = len(key2) if cversion not in [1, 2]: raise RuntimeError("Cannot determine the compression scheme for PCK compression (either version 1 or 2)") if (overflowPix is None) and (overflowPix is not False): end = raw.find("END OF HEADER") start = raw[:end].find("HIGH") hiLine = raw[start:end] hiLine = hiLine.split("\n")[0] word = hiLine.split() if len(word) > 1: chigh = int(word[1]) else: logger.warning("Error while looking for overflowed pixels in line %s", hiLine.strip()) chigh = 0 else: chigh = < int > overflowPix instream = numpy.fromstring(raw[normal_offset:].lstrip(), dtype=numpy.uint8) if use_CCP4: data = numpy.empty((cdimy, cdimx), dtype=numpy.uint32) with nogil: ################################################################################ # rely to whichever version of ccp4_unpack is appropriate ################################################################################ if cversion == 1: ccp4_unpack_string(&data[0, 0], &instream[0], cdimx, cdimy, 0) else: # cversion == 2: ccp4_unpack_v2_string(&data[0, 0], &instream[0], cdimx, cdimy, 0) else: # There is a bug in the mar345 implementation which performs arithmetics # of post-decompression in 16bits integers and overflows with large values unpacked = unpack_pck(instream, cdimx, cdimy).get1d() data = numpy.ascontiguousarray(postdec(unpacked, cdimx), numpy.uint32).reshape((cdimy, cdimx)) if chigh > 0: ################################################################################ # handle overflows: Each record is 8 overflow of 2x32bits integers ################################################################################ records = (chigh + PACK_SIZE_HIGH - 1) // PACK_SIZE_HIGH stop = normal_offset - lenkey - 14 odata = numpy.fromstring(raw[stop - 64 * records: stop], dtype=numpy.int32) if swap_needed: odata.byteswap(True) overflow_data = odata.reshape((-1, 2)) for i in range(overflow_data.shape[0]): idx = overflow_data[i, 0] - 1 # indexes are even values (-1 because 1 based counting) value = overflow_data[i, 1] # values are odd values if (idx >= 0) and (idx < cdimx * cdimy): data[idx // cdimx, idx % cdimx] = value return numpy.asarray(data) ################################################################################ # Re-Implementation of the pck compression/decompression ################################################################################ @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @cython.initializedcheck(False) cpdef inline cnp.int32_t[::1] precomp(cnp.int16_t[::1] img, cnp.uint32_t width): """Pre-compression by subtracting the average value of the four neighbours Actually it looks a bit more complicated: * there comes the +2 from ? * the first element remains untouched * elements of the first line (+ first of second) use only former element JPA, the original author wrote: Compression is achieved by first calculating the differences between every pixel and the truncated value of four of its neighbours. For example: the difference for a pixel at img[x, y] is: comp[y, x] = img[y, x] - (img[y-1, x-1] + img[y-1, x] + img[y-1, x+1] + img[y, x-1]) / 4 This part implements overlows of int16 as the reference implementation is buggy """ cdef: cnp.uint32_t size, i cnp.int32_t[::1] comp cnp.int16_t last, cur, im0, im1, im2 size = img.size comp = numpy.zeros(size, dtype=numpy.int32) with nogil: # First pixel comp[0] = last = im0 = img[0] im1 = img[1] im2 = img[2] # First line (+ 1 pixel) for i in range(1, width + 1): cur = img[i] comp[i] = cur - last last = cur # Rest of the image for i in range(width + 1, size): cur = img[i] comp[i] = (cur - (last + im0 + im1 + im2 + 2) // 4) last = cur im0 = im1 im1 = im2 im2 = img[i - width + 2] return comp @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @cython.initializedcheck(False) cpdef inline cnp.uint32_t[::1] postdec(cnp.int32_t[::1] comp, int width): """Post decompression by adding the average value of the four neighbours Actually it looks a bit more complicated: * there comes the +2 from ? * the first element remains untouched * elements of the first line (+ fist of second) use only former element JPA, the original author wrote: Compression is achieved by first calculating the differences between every pixel and the truncated value of four of its neighbours. For example: the difference for a pixel at img[x, y] is: comp[y, x] = img[y, x] - (img[y-1, x-1] + img[y-1, x] + img[y-1, x+1] + img[y, x-1]) / 4 This part implementes overlows of int16 as the reference implementation is bugged """ cdef: cnp.uint32_t size, i cnp.uint32_t[::1] img cnp.int16_t last, cur, fl0, fl1, fl2 size = comp.size img = numpy.zeros(size, dtype=numpy.uint32) with nogil: # First pixel last = comp[0] img[0] = last # First line (+ 1 pixel) for i in range(1, width + 1): img[i] = cur = comp[i] + last last = cur # Rest of the image: not parallel in this case fl0 = img[0] fl1 = img[1] fl2 = img[2] for i in range(width + 1, size): # overflow expected here. cur = comp[i] + (last + fl0 + fl1 + fl2 + 2) // 4 # ensures the data is cropped at 16 bits! img[i] = cur last = cur fl0 = fl1 fl1 = fl2 fl2 = img[i - width + 2] return img ################################################################################ # Re-Implementation of the pck compression stuff ################################################################################ @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @cython.initializedcheck(False) cpdef inline int calc_nb_bits(any_int_t[::1] data, cnp.uint32_t start, cnp.uint32_t stop) nogil: """Calculate the number of bits needed to encode the data :param data: input data, probably slices of a larger array :param start: start position :param stop: stop position :return: the needed number of bits to store the values Comment from JPA: ................. Returns the number of bits necessary to encode the longword-array 'chunk' of size 'n' The size in bits of one encoded element can be 0, 4, 5, 6, 7, 8, 16 or 32. """ cdef: cnp.uint32_t size, maxsize, i, abs_data any_int_t read_data size = stop - start maxsize = 0 for i in range(start, stop): read_data = data[i] abs_data = - read_data if read_data < 0 else read_data if abs_data > maxsize: maxsize = abs_data if maxsize == 0: return 0 elif maxsize < 8: return size * 4 elif maxsize < 16: return size * 5 elif maxsize < 32: return size * 6 elif maxsize < 64: return size * 7 elif maxsize < 128: return size * 8 elif maxsize < 32768: return size * 16 else: return size * 32 @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @cython.initializedcheck(False) def pack_image(img, bint do_precomp=True): """Pack an image into a binary compressed block :param img: input image as numpy.int16 :param do_precomp: perform the subtraction to the 4 neighbours's average. False is for testing the packing only :return: 1D array of numpy.int8 JPA wrote: .......... Pack image 'img', containing 'x * y' WORD-sized pixels into byte-stream """ cdef: cnp.uint32_t nrow, ncol, size, stream_size cnp.int16_t[::1] input_image cnp.int32_t[::1] raw PackContainer container cnp.uint32_t i, position cnp.uint32_t nb_val_packed cnp.uint32_t current_block_size, next_bock_size if do_precomp: assert len(img.shape) == 2 nrow = img.shape[0] ncol = img.shape[1] input_image = numpy.ascontiguousarray(img, dtype=numpy.int16).ravel() # pre compression: subtract the average of the 4 neighbours raw = precomp(input_image, ncol) size = nrow * ncol else: raw = numpy.ascontiguousarray(img, dtype=numpy.int32).ravel() size = raw.size # allocation of the output buffer container = PackContainer(size) position = 0 while position < size: nb_val_packed = 1 current_block_size = calc_nb_bits(raw, position, position + nb_val_packed) while ((position + nb_val_packed) < size) and (nb_val_packed < 128): if (position + 2 * nb_val_packed) < size: next_bock_size = calc_nb_bits(raw, position + nb_val_packed, position + 2 * nb_val_packed) else: break if 2 * max(current_block_size, next_bock_size) < (current_block_size + next_bock_size + CCP4_PCK_BLOCK_HEADER_LENGTH): nb_val_packed *= 2 current_block_size = 2 * max(current_block_size, next_bock_size) else: break container.append(raw, position, nb_val_packed, current_block_size) position += nb_val_packed return container cdef class PackContainer: cdef: readonly cnp.uint32_t position, offset, allocated cnp.uint8_t[::1] data def __cinit__(self, cnp.uint32_t size=4096): """Constructor of the class :param size: start size of the array """ self.position = 0 self.offset = 0 self.allocated = size self.data = numpy.zeros(self.allocated, dtype=numpy.uint8) def __dealloc__(self): self.data = None def get(self): """retrieve the populated array""" if self.offset: end = self.position + 1 else: end = self.position return numpy.asarray(self.data[:end]) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @cython.initializedcheck(False) cpdef append(self, cnp.int32_t[::1] data, cnp.uint32_t position, cnp.uint32_t nb_val, cnp.uint32_t block_size): """Append a block of data[position: position+nb_val] to the compressed stream. Only the most significant bits are takes. :param data: input uncompressed image as 1D array :param position: start position of reading of the image :param nb_val: number of value from data to pack in the block :param block_size: number of bits for the whole block The 6 bits header is managed here as well as the stream resizing. """ cdef: cnp.uint32_t offset, index, i, bit_per_val, nb_bytes cnp.uint64_t tmp, tostore, mask cnp.int64_t topack bit_per_val = block_size // nb_val # realloc memory if needed nb_bytes = (CCP4_PCK_BLOCK_HEADER_LENGTH + block_size + 7) // 8 if self.position + nb_bytes >= self.allocated: self.allocated *= 2 new_stream = numpy.zeros(self.allocated, dtype=numpy.uint8) if self.offset: new_stream[:self.position + 1] = self.data[:self.position + 1] else: new_stream[:self.position] = self.data[:self.position] self.data = new_stream with nogil: if self.offset == 0: tmp = 0 else: tmp = self.data[self.position] # append 6 bits of header tmp |= pack_nb_val(nb_val, bit_per_val) << self.offset self.offset += CCP4_PCK_BLOCK_HEADER_LENGTH self.data[self.position] = tmp & (255) if self.offset >= 8: self.position += 1 self.offset -= 8 self.data[self.position] = (tmp >> 8) & (255) # now pack every value in input stream" for i in range(nb_val): topack = data[position + i] mask = ((1 << (bit_per_val - 1)) - 1) tmp = (topack & mask) if topack < 0: # handle the sign tmp |= 1 << (bit_per_val - 1) # read last position if self.offset == 0: tostore = 0 else: tostore = self.data[self.position] tostore |= tmp << self.offset self.offset += bit_per_val # Update the array self.data[self.position] = tostore & (255) while self.offset >= 8: tostore = tostore >> 8 self.offset -= 8 self.position += 1 self.data[self.position] = tostore & (255) cpdef inline cnp.uint8_t pack_nb_val(cnp.uint8_t nb_val, cnp.uint8_t value_size) nogil: """Calculate the header to be stored in 6 bits :param nb_val: number of values to be stored: must be a power of 2 <=128 :param value_size: can be 0, 4, 5, 6, 7, 8, 16 or 32, the number of bits per value :return: the header as an unsigned char """ cdef: cnp.uint32_t value, i value = 0 for i in range(8): if (nb_val >> i) == 1: value |= i break value |= (CCP4_BITSIZE[value_size]) << (CCP4_PCK_BLOCK_HEADER_LENGTH >> 1) # should be 6/2 = 3 return value ################################################################################ # Re-Implementation of the pck uncompression stuff ################################################################################ @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @cython.initializedcheck(False) cpdef UnpackContainer unpack_pck(cnp.uint8_t[::1] stream, int ncol, int nrow): """Unpack the raw stream and return the image V1 only for now, V2 may be added later :param stream: raw input stream :param ncol: number of columns in the image (i.e width) :param nrow: number if rows in the image (i.e. height) :return: Container with decompressed image """ cdef: cnp.uint32_t offset # Number of bit to offset in the current byte cnp.uint32_t pos, end_pos # current position and last position of block in byte stream cnp.uint32_t size # size of the input stream cnp.int32_t value, next # integer values cnp.uint32_t nb_val_packed, nb_bit_per_val, nb_bit_in_block UnpackContainer cont # Container with unpacked data cont = UnpackContainer(ncol, nrow) size = stream.size # Luckily we start at byte boundary offset = 0 pos = 0 while pos < (size) and cont.position < (cont.size): value = stream[pos] if offset > (8 - CCP4_PCK_BLOCK_HEADER_LENGTH): # wrap around pos += 1 next = stream[pos] value |= next << 8 value = value >> offset offset += CCP4_PCK_BLOCK_HEADER_LENGTH - 8 elif offset == (8 - CCP4_PCK_BLOCK_HEADER_LENGTH): # Exactly on the boundary value = value >> offset pos += 1 offset = 0 else: # stay in same byte value = value >> offset offset += CCP4_PCK_BLOCK_HEADER_LENGTH # we use 7 as mask: decimal value of 111 # move from offset, read 3 lsb, take the power of 2 nb_val_packed = 1 << (value & 7) # read 3 next bits, search in LUT for the size of each element in block nb_bit_per_val = CCP4_PCK_BIT_COUNT[(value >> 3) & 7] if nb_bit_per_val == 0: cont.set_zero(nb_val_packed) else: nb_bit_in_block = nb_bit_per_val * nb_val_packed cont.unpack(stream, pos, offset, nb_val_packed, nb_bit_per_val) offset += nb_bit_in_block pos += offset // 8 offset %= 8 return cont cdef class UnpackContainer: cdef: readonly cnp.uint32_t nrow, ncol, position, size readonly cnp.int32_t[::1] data # readonly list debug def __cinit__(self, int ncol, int nrow): self.nrow = nrow self.ncol = ncol self.size = nrow * ncol self.data = numpy.zeros(self.size, dtype=numpy.int32) self.position = 0 def __dealloc__(self): self.data = None def get(self): """retrieve the populated array""" return numpy.asarray(self.data).reshape((self.nrow, self.ncol)) cpdef cnp.int32_t[::1] get1d(self): """retrieve the populated array""" return self.data cpdef set_zero(self, int number): "set so many zeros" self.position += number @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @cython.initializedcheck(False) cpdef unpack(self, cnp.uint8_t[::1] stream, cnp.uint32_t pos, cnp.uint32_t offset, cnp.uint32_t nb_value, cnp.uint32_t value_size): """unpack a block on data, all the same size :param stream: input stream, already sliced :param offset: number of bits of offset, at the begining of the stream :param nb_value: number of values to unpack :param value_size: number of bits of each value """ cdef: cnp.uint32_t i, j # simple counters cnp.uint32_t new_offset # position after read cnp.int64_t cur, tmp2 # value to be stored cnp.uint64_t tmp # under contruction: needs to be unsigned int to_read # number of bytes to read with nogil: cur = 0 for i in range(nb_value): # read as many bytes as needed and unpack them to tmp variable tmp = stream[pos] >> offset new_offset = value_size + offset to_read = (new_offset + 7) // 8 for j in range(1, to_read): tmp |= (stream[pos + j]) << (8 * j - offset) # Remove the msb of tmp to keep only the interesting values cur = tmp & ((1 << (value_size)) - 1) # change sign if most significant bit is 1 if cur >> (value_size - 1): cur |= (-1) << (value_size - 1) # Update the storage self.data[self.position] = cur self.position += 1 # Update the position in the array pos = pos + new_offset // 8 offset = new_offset % 8 fabio-0.6.0/fabio/ext/__init__.py0000644001611600070440000000000013227357030017671 0ustar kiefferscisoft00000000000000fabio-0.6.0/fabio/ext/_cif.pyx0000644001611600070440000001231613227357030017237 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # Copyright (C) 2015 European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """Cif parser helper functions""" __author__ = "Jerome Kieffer" __contact__ = "jerome.kieffer@esrf.eu" __license__ = "MIT" __copyright__ = "2014, European Synchrotron Radiation Facility, Grenoble, France" __date__ = "25/07/2017" cimport numpy import numpy import cython @cython.boundscheck(False) def split_tokens(bytes_text): """ Separate the text representing a CIF file into a list of tokens. :param bytes_text: the content of the CIF - file :type bytes_text: 8-bit string (str in python2 or bytes in python3) :return: list of all the fields of the CIF :rtype: list """ cdef: unsigned char[:] ary = bytearray(bytes_text) bint in_comment = False, in_single_quote = False bint in_double_quote = False, multiline = False, go_on = True int i = -1, start = -1, end = -1, imax char prev, next, cur = b"\n" bytes EOL = b'\r\n' bytes BLANK = b" \t\r\n" unsigned char SINGLE_QUOTE = b"'" unsigned char DOUBLE_QUOTE = b'"' unsigned char SEMICOLUMN = b';' unsigned char HASH = b"#" unsigned char UNDERSCORE = b"_" unsigned char DASH = b"-" unsigned char QUESTIONMARK = b"?" bytes BINARY_MARKER = b"--CIF-BINARY-FORMAT-SECTION--" int lbms = len(BINARY_MARKER) next = ary[0] imax = len(bytes_text) - 1 fields = [] while go_on: i += 1 prev = cur cur = next if i < imax: next = ary[i + 1] else: next = b"\n" go_on = False # print(i,chr(prev),chr(cur),chr(next),in_comment,in_single_quote,in_double_quote,multiline, start, cur ==SINGLE_QUOTE) # Skip comments if in_comment: if cur in EOL: in_comment = False continue if prev in EOL: if cur == HASH: in_comment = True continue if cur == SEMICOLUMN: if multiline: fields.append(bytes_text[start:i].strip()) start = -1 multiline = False else: multiline = True start = i + 1 continue if multiline: # Handle CBF if cur == DASH: if bytes_text[i:i + lbms] == BINARY_MARKER: end = bytes_text[i + lbms:].find(BINARY_MARKER) i += end + 2 * lbms cur = ary[i] next = ary[i + 1] continue # Handle single quote if cur == SINGLE_QUOTE: if (not in_single_quote) and (not in_double_quote) and (start < 0) and (prev in BLANK): start = i + 1 in_single_quote = True continue if (in_single_quote) and (not in_double_quote) and (start >= 0) and (next in BLANK): fields.append(bytes_text[start:i].strip()) start = -1 in_single_quote = False continue if in_single_quote: continue # Handle double quote if cur == DOUBLE_QUOTE: if (not in_single_quote) and (not in_double_quote) and (start < 0) and (prev in BLANK): start = i + 1 in_double_quote = True continue if (not in_single_quote) and (in_double_quote) and (start >= 0) and (next in BLANK): fields.append(bytes_text[start:i].strip()) start = -1 in_double_quote = False continue if in_double_quote: continue # Normal fields if cur in BLANK: if start >= 0: fields.append(bytes_text[start:i].strip()) start = -1 else: if start < 0: start = i if start >= 0: fields.append(bytes_text[start:].strip()) return fields fabio-0.6.0/fabio/ext/byte_offset.pyx0000644001611600070440000003222313227357030020647 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # Copyright (C) 2015 European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Cif Binary Files images are 2D images written by the Pilatus detector and others. They use a modified (simplified) byte-offset algorithm. This file contains the decompression function from a string to an int64 numpy array. """ __author__ = "Jerome Kieffer" __contact__ = "jerome.kieffer@esrf.eu" __license__ = "MIT" __copyright__ = "2010-2016, European Synchrotron Radiation Facility, Grenoble, France" __date__ = "11/08/2017" cimport numpy import numpy import cython @cython.boundscheck(False) @cython.wraparound(False) def comp_cbf32(data not None): """Compress a dataset using the byte-offset described for Pilatus :param data: array of integers :return: numpy array of chars """ cdef: numpy.int32_t[::1] ary = numpy.ascontiguousarray(data.ravel(), dtype=numpy.int32) int size = ary.size, i = 0, j = 0 numpy.int8_t[::1] output = numpy.zeros(size * 7, dtype=numpy.int8) numpy.int32_t last, current, delta, absdelta last = 0 for i in range(size): current = ary[i] delta = current - last absdelta = delta if delta > 0 else -delta if absdelta >= 1 << 15: output[j] = -128 output[j + 1] = 0 output[j + 2] = -128 output[j + 3] = (delta & 255) output[j + 4] = (delta >> 8) & 255 output[j + 5] = (delta >> 16) & 255 output[j + 6] = (delta >> 24) j += 7 elif absdelta >= 1 << 7: output[j] = -128 output[j + 1] = delta & 255 output[j + 2] = (delta >> 8) & 255 j += 3 else: output[j] = delta j += 1 last = current return numpy.asarray(output)[:j] @cython.boundscheck(False) @cython.wraparound(False) def comp_cbf(data not None): """Compress a dataset using the byte-offset described for any int64 :param data: array of integers :return: numpy array of chars """ cdef: numpy.int64_t[::1] ary = numpy.ascontiguousarray(data.ravel(), dtype=numpy.int64) int size = ary.size, i = 0, j = 0 numpy.int8_t[::1] output = numpy.zeros(size * 15, dtype=numpy.int8) numpy.int64_t last, current, delta, absdelta last = 0 for i in range(size): current = ary[i] delta = current - last absdelta = delta if delta > 0 else -delta if absdelta >= 1 << 31: output[j] = -128 output[j + 1] = 0 output[j + 2] = -128 output[j + 3] = 0 output[j + 4] = 0 output[j + 5] = 0 output[j + 6] = -128 output[j + 7] = (delta & 255) output[j + 8] = (delta >> 8) & 255 output[j + 9] = (delta >> 16) & 255 output[j + 10] = (delta >> 24) & 255 output[j + 11] = (delta >> 32) & 255 output[j + 12] = (delta >> 40) & 255 output[j + 13] = (delta >> 48) & 255 output[j + 14] = (delta >> 56) & 255 j += 15 elif absdelta >= 1 << 15: output[j] = -128 output[j + 1] = 0 output[j + 2] = -128 output[j + 3] = (delta & 255) output[j + 4] = (delta >> 8) & 255 output[j + 5] = (delta >> 16) & 255 output[j + 6] = (delta >> 24) j += 7 elif absdelta >= 1 << 7: output[j] = -128 output[j + 1] = delta & 255 output[j + 2] = (delta >> 8) & 255 j += 3 else: output[j] = delta j += 1 last = current return numpy.asarray(output)[:j] @cython.boundscheck(False) @cython.wraparound(False) def dec_cbf(bytes stream not None, size=None): """ Analyze a stream of char with any length of exception (2,4, or 8 bytes integers) :param stream: bytes (string) representing the compressed data :param size: the size of the output array (of longInts) :return: int64 ndArrays """ cdef: int i = 0 int j = 0 numpy.uint8_t tmp8 = 0 numpy.int64_t last = 0 numpy.int64_t current = 0 numpy.int64_t tmp64 = 0 numpy.int64_t tmp64a = 0 numpy.int64_t tmp64b = 0 numpy.int64_t tmp64c = 0 numpy.int64_t tmp64d = 0 numpy.int64_t tmp64e = 0 numpy.int64_t tmp64f = 0 numpy.int64_t tmp64g = 0 numpy.uint8_t key8 = 0x80 numpy.uint8_t key0 = 0x00 int csize int lenStream = < int > len(stream) numpy.uint8_t[:] cstream = bytearray(stream) if size is None: csize = lenStream else: csize = < int > size cdef numpy.ndarray[numpy.int64_t, ndim = 1] dataOut = numpy.empty(csize, dtype=numpy.int64) with nogil: while (i < lenStream) and (j < csize): if (cstream[i] == key8): if ((cstream[i + 1] == key0) and (cstream[i + 2] == key8)): if (cstream[i + 3] == key0) and (cstream[i + 4] == key0) and (cstream[i + 5] == key0) and (cstream[i + 6] == key8): # Retrieve the interesting Bytes of data tmp64g = cstream[i + 7] tmp64f = cstream[i + 8] tmp64e = cstream[i + 9] tmp64d = cstream[i + 10] tmp64c = cstream[i + 11] tmp64b = cstream[i + 12] tmp64a = cstream[i + 13] tmp64 = cstream[i + 14] # Assemble data into a 64 bits integer current = (tmp64 << 56) | (tmp64a << 48) | (tmp64b << 40) | (tmp64c << 32) | (tmp64d << 24) | (tmp64e << 16) | (tmp64f << 8) | (tmp64g) i += 15 else: # Retrieve the interesting Bytes of data tmp64c = cstream[i + 3] tmp64b = cstream[i + 4] tmp64a = cstream[i + 5] tmp64 = cstream[i + 6] # Assemble data into a 64 bits integer current = (tmp64 << 24) | (tmp64a << 16) | (tmp64b << 8) | (tmp64c) i += 7 else: tmp64a = cstream[i + 1] tmp64 = cstream[i + 2] current = (tmp64 << 8) | (tmp64a) i += 3 else: current = ( cstream[i]) i += 1 last += current dataOut[j] = last j += 1 return dataOut[:j] @cython.boundscheck(False) def dec_cbf32(bytes stream not None, size=None): """ Analyze a stream of char with any length of exception (2 or 4 bytes integers) Optimized for int32 decompression :param stream: bytes (string) representing the compressed data :param size: the size of the output array (of longInts) :return: int64 ndArrays """ cdef: int i = 0 int j = 0 numpy.uint8_t tmp8 = 0 numpy.int32_t last = 0 numpy.int32_t current = 0 numpy.int32_t tmp64 = 0 numpy.int32_t tmp64a = 0 numpy.int32_t tmp64b = 0 numpy.int32_t tmp64c = 0 numpy.uint8_t key8 = 0x80 numpy.uint8_t key0 = 0x00 int csize int lenStream = < int > len(stream) numpy.uint8_t[:] cstream = bytearray(stream) if size is None: csize = lenStream else: csize = < int > size cdef numpy.ndarray[numpy.int32_t, ndim = 1] dataOut = numpy.empty(csize, dtype=numpy.int32) with nogil: while (i < lenStream) and (j < csize): if (cstream[i] == key8): if ((cstream[i + 1] == key0) and (cstream[i + 2] == key8)): # Retrieve the interesting Bytes of data tmp64c = cstream[i + 3] tmp64b = cstream[i + 4] tmp64a = cstream[i + 5] tmp64 = cstream[i + 6] # Assemble data into a 32 bits integer current = (tmp64 << 24) | (tmp64a << 16) | (tmp64b << 8) | (tmp64c) i += 7 else: tmp64a = cstream[i + 1] tmp64 = cstream[i + 2] current = (tmp64 << 8) | (tmp64a) i += 3 else: current = ( cstream[i]) i += 1 last += current dataOut[j] = last j += 1 return dataOut[:j] @cython.boundscheck(False) def dec_TY5(bytes stream not None, size=None): """ Analyze a stream of char with a TY5 compression scheme and exception (2 or 4 bytes integers) TODO: known broken, FIXME :param stream: bytes (string) representing the compressed data :param size: the size of the output array (of longInts) :return: int32 ndArrays """ cdef: int i = 0 int j = 0 numpy.int32_t last = 0 numpy.int32_t current = 0 # numpy.uint8_t tmp8 = 0 numpy.uint8_t key8 = 0xfe # 127+127 numpy.int32_t tmp32a = 0 numpy.int32_t tmp32b = 0 # numpy.int32_t tmp32c = 0 # numpy.int32_t tmp32d = 0 int csize int lenStream = len(stream) numpy.uint8_t[:] cstream = bytearray(stream) if size is None: csize = lenStream else: csize = < int > size cdef numpy.ndarray[numpy.int32_t, ndim = 1] dataOut = numpy.zeros(csize, dtype=numpy.int32) if True: while (i < lenStream) and (j < csize): if (cstream[i] == key8): tmp32a = cstream[i + 1] - 127 tmp32b = ( cstream[i + 2] << 8 ) # print(tmp32a, tmp32b, (tmp32b|tmp32a)) current = (tmp32b) | (tmp32a) i += 3 else: current = ( cstream[i]) - 127 i += 1 last += current dataOut[j] = last j += 1 return dataOut[:j] # # determines the current position in the bitstream # position=headersize+columnnumber*row+column+offset # value=float(file[position])-127 # if value<127: # # this is the normal case # # two bytes encode one pixel # basevalue=value+basevalue # data[row][column]=basevalue # elif value==127: # # this is the special case 1 # # if the marker 127 is found the next four bytes encode one pixel # if float(file[position+2]) < 127: # # resulting value is positive # value=(float(file[position+1]))+255*(float(file[position+2])) # elif float(file[position+2]) > 127: # # resulting value is negative # value=float(file[position+1])+255*(float(file[position+2])-256) # basevalue=value+basevalue # data[row][column]=basevalue # offset=offset+2 # if float(file[position+0])+float(file[position+1])==510: # # this is the special case 1 # # i do not know what is going on # print('special case i can not explain.') # offset=offset+8 # if basevalue > 500: # # just a way to cut off very high intensities # data[row][column]=500 fabio-0.6.0/fabio/ext/src/0000755001611600070440000000000013227375744016375 5ustar kiefferscisoft00000000000000fabio-0.6.0/fabio/ext/src/cf_iomodule.c0000644001611600070440000000450413227357030021015 0ustar kiefferscisoft00000000000000#include #include #include #include #include "columnfile.h" static PyObject *cf_read(PyObject *self, PyObject *args, PyObject *keywds){ cf_data *cf__; PyArrayObject *py_data; PyStringObject *str; PyListObject *clabels; static char *kwlist[]={"file","mode",NULL}; const char mode[]="a "; unsigned int flags=0; /* perhaps not const */ int dim1,dim2,ocount; int dims[2]; int i; FILE *file; PyObject *py_file; if (!PyArg_ParseTupleAndKeywords(args,keywds,"O|s",kwlist,&py_file,&mode)) return NULL; file=PyFile_AsFile(py_file); if (strchr(mode,'z')){ flags|=CF_GZ_COMP; } if(strchr(mode,'b')){ cf__=(cf_data *) cf_read_bin(file,NULL,flags); }else if (strchr(mode,'a')) { cf__=(cf_data *) cf_read_ascii(file,NULL,flags); }else{ fprintf(stderr,"unrecognized mode for columnfile %s (assuming ascii)\n",mode); cf__= (cf_data *)cf_read_ascii(file,NULL,flags); } /*check for failure to read*/ if (cf__==NULL){ return Py_BuildValue("OO",Py_None,Py_None); } dims[0]=cf__->nrows;dims[1]=cf__->ncols; /*since data may be non-contigous we can't simply create a numpy-array from cf__->data, as Numpy's memory model prohibits it*/ /*i.e. py_data=(PyArrayObject*)PyArray_SimpleNewFromData(2, dims, NPY_DOUBLE, (void*)(&(cf__->data[0][0]))); * won't work*/ py_data=(PyArrayObject *)PyArray_SimpleNew(2,dims,NPY_DOUBLE); for (i=0;inrows;i++){ memcpy((double *)PyArray_GETPTR2(py_data,i,0),cf__->data[i],cf__->ncols*sizeof(double)); } clabels=(PyListObject *)PyList_New(0); for (i=0;incols;i++){ str = (PyStringObject*)PyString_FromString(cf__->clabels[i]); if (PyList_Append((PyObject*)clabels,(PyObject*)str)){ fprintf(stderr,"cannot insert column label %d\n",i); } } cf_free(cf__); return Py_BuildValue("OO", PyArray_Return(py_data),clabels); } static PyMethodDef cf_io_Methods[] = { {"read",(PyCFunction)cf_read, METH_VARARGS | METH_KEYWORDS, "call the c-columnfile reading interface. The mode keyword argument is either:\n \"a\" for ascii (the default)\n \"b\" for binary"}, {NULL, NULL, 0, NULL} }; PyMODINIT_FUNC initcf_io(void) { (void) Py_InitModule("cf_io",cf_io_Methods); import_array(); if (PyErr_Occurred()) Py_FatalError("cannot initialize cf_iomodule.c"); } fabio-0.6.0/fabio/ext/src/columnfile.c0000644001611600070440000001352313227357030020666 0ustar kiefferscisoft00000000000000#include #include #include #ifndef HAVE_ZLIB_H #define HAVE_ZLIB_H 0 #else #include #endif #include "columnfile.h" static char hdr_ctl[]="# %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s"; int compression_yes(char *fname){ /*should we use compression*/ char *p; if ( HAVE_ZLIB_H && (p=strstr(fname,".gz"))!=NULL && pnralloc;i++){ if (p->data[i]!=NULL) free(p->data[i]); } if( p->data!=NULL){free(p->data);} for (i=0;incols;i++){ if(p->clabels[i]!=NULL) free(p->clabels[i]); } if(p->clabels!=NULL){free(p->clabels);} free(p); } } int cf_write(char *fname,void *cf_handle, unsigned int FLAGS){ int status; #if HAVE_ZLIB_H if (FLAGS & CF_GZ_COMP){ gzFile gzfp=gzopen(fname,"wbh"); if (gzfp==NULL) return -1; status=-1; if (FLAGS && CF_BIN){ status=cf_write_bin_gz(gzfp,cf_handle); }else{ status=cf_write_ascii_gz(gzfp,cf_handle); } gzclose(gzfp); return status; }else{ #else if(1){ #endif FILE *fp=fopen(fname,"wb"); if (fp==NULL) return -1; status=-1; if (FLAGS && CF_BIN){ /*status=cf_write_bin(fp,cf_handle); */ }else{ status=cf_write_ascii(fp,cf_handle,0); } fclose(fp); return status; } } int cf_write_ascii(void *fp, void *cf_handle, unsigned int FLAGS){/*{{{*/ int r,c; cf_data *cf_=(cf_data *) cf_handle; #if HAVE_ZLIB_H if (FLAGS & CF_GZ_COMP){ gzprintf((gzFile)fp,"#"); for (i=0;incols;i++){ gzprintf((gzFile)fp," %s",cf_->clabels[i]); } gzprintf((gzFile)fp,"\n"); for (r=0;rnrows;r++){ for (i=0;incols;i++){ gzprintf((gzFile)fp," %g",cf_->data[i][r]); } gzprintf((gzFile)fp,"\n"); } return 0; }else{ #endif fprintf((FILE *)fp,"#"); for (c=0;cncols;c++){ fprintf((FILE *)fp," %s",cf_->clabels[c]); } fprintf((FILE *)fp,"\n"); for (r=0;rnrows;r++){ for (c=0;cncols;c++){ fprintf((FILE *)fp," %g",cf_->data[c][r]); } fprintf((FILE *)fp,"\n"); } return 0; #if HAVE_ZLIB_H } #endif }/*}}}*/ void *cf_read_ascii(void *fp, void *dest, unsigned int FLAGS){/*{{{*/ /*read the first line and figure out how many columns we have*/ char line[2048]; int i,r; int nr_alloc=CF_INIT_ROWS; int nc_alloc=CF_INIT_COLS; int ncols; char **clabels,**cp; double **data,**dp; char *p; cf_data *dest_local; /*read the first line into buffer*/ #if HAVE_ZLIB_H if (FLAGS & CF_GZ_COMP){ if ((gzgets((gzFile )fp,line,2048))==Z_NULL) {fprintf(stderr,"zlib io error in %s \n",__FILE__);return NULL;} }else{ if((fgets(line,2048,(FILE *)fp))==NULL){fprintf(stderr,"io-error in %s\n",__FILE__);return NULL;} } #else if((fgets(line,2048,(FILE *)fp))==NULL){fprintf(stderr,"io-error in %s\n",__FILE__);return NULL;} #endif /*initially allocate room for 32 columns - if that is not enough should reallocate*/ clabels=(char**) malloc(CF_INIT_COLS* sizeof(char*)); for (cp=clabels;cpnon-ws slopes. when one is found read from pc-1 into header storage. exit when line is exhausted*/ /*count the number of entries*/ ncols=0; /*headers are supposed to start with # so skip that*/ if (*line=='#') p=line+1; else p=line; while (*p!='\0' || *p!='\n' || pncols=ncols; ((cf_data *) dest_local)->nrows=r; ((cf_data *) dest_local)->nralloc=nr_alloc; ((cf_data *) dest_local)->clabels=clabels; ((cf_data *) dest_local)->data=data; return (void *) dest_local; }/*}}}*/ void *cf_read_bin(void *fp, void *dest, unsigned int FLAGS){ return NULL; } fabio-0.6.0/fabio/ext/src/ccp4_pack.c0000644001611600070440000007222513227357030020364 0ustar kiefferscisoft00000000000000/* Fabio Mar345 ccp4_pack decompressor Copyright (C) 2007-2009 Henning O. Sorensen & Erik Knudsen 2012 ESRF This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* part of this code is freely adaped from pack_c.c from CCP4 distribution * (which is also LGPL). The original author is Jan Pieter Abrahams * jpa@mrc-lmb.cam.ac.uk This file contains functions capable of compressing and decompressing images. It is especially suited for X-ray diffraction patterns, or other image formats in which orthogonal pixels contain "grey-levels" and vary smoothly accross the image. Clean images measured by a MAR-research image plate scanner containing two bytes per pixel can be compressed by a factor of 3.5 to 4.5 . Since the images are encoded in a byte-stream, there should be no problem concerning big- or little ended machines: both will produce an identical packed image. Compression is achieved by first calculating the differences between every pixel and the truncated value of four of its neighbours. For example: the difference for a pixel at img[x, y] is: img[x, y] - (int) (img[x-1, y-1] + img[x-1, y] + img[x-1, y+1] + img[x, y-1]) / 4 After calculating the differences, they are encoded in a packed array. A packed array consists of consequitive chunks which have the following format: - Three bits containing the logarithm base 2 of the number of pixels encoded in the chunk. - Three bits defining the number of bits used to encode one element of the chunk. The value of these three bits is used as index in a lookup table to get the actual number of bits of the elements of the chunk. Note: in version 2, there are four bits in this position!! This allows more efficient packing of synchrotron data! The routines in this sourcefile are backwards compatible. JPA, 26 June 1995 - The truncated pixel differences. To compress an image, call pack_wordimage_c() or pack_longimage_c(). These will append the packed image to any header information already written to disk (take care that the file containing this information is closed before calling). To decompress an image, call readpack_word_c() or readpack_long_c(). These functions will find the start of the packed image themselves, irrespective of the header format. Jan Pieter Abrahams, 6 Jan 1993 */ #include #include "string.h" #include "assert.h" /*array translating the number of errors per block*/ static unsigned int CCP4_PCK_ERR_COUNT[] = {1,2,4,8,16,32,64,128}; /*array translating the number of bits per error*/ static unsigned int CCP4_PCK_BIT_COUNT[]= {0,4,5,6,7,8,16,32}; /*array translating the number of errors per block - can use shifts as well actually*/ static unsigned int CCP4_PCK_ERR_COUNT_V2[] = {1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768}; /*array translating the number of bits per error*/ static unsigned int CCP4_PCK_BIT_COUNT_V2[]= {0,4,5,6,7,8,9,10,11,12,13,14,15,16,32}; static const unsigned char CCP4_PCK_MASK[]={0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F, 0xFF}; static const unsigned int CCP4_PCK_MASK_16[]={0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; static const unsigned long CCP4_PCK_MASK_32[]={0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF, 0x3FFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF}; void *mar345_read_data_string(char *instring, int ocount, int dim1, int dim2){ // first process overflow bytes - for now we just ignore them // * these are stored in 64 byte records int orecords=(int)(ocount/8.0+0.875); int *odata,x,y,version=0; char *c,cbuffer[64]=""; char *t_; unsigned int *unpacked_array; odata=(int*)malloc(64*8*orecords); if (!odata) return NULL; memcpy(odata, instring, 64*orecords); t_ = instring + (64*orecords); // there is no stdout in a gui, sorry // now after they have been read find the CCP4.....string and compare to dim1 c=cbuffer; while((*c)!=EOF){ if (c==cbuffer+63){ c=cbuffer; } *c = (char) *t_; t_++; // set the next character to a \0 so the string is always terminated *(c+1)='\0'; if (*c=='\n'){ // check for the CCP- string x=y=0; sscanf(cbuffer,"CCP4 packed image, X: %04d, Y: %04d", &x,&y); if (x==dim1 || y ==dim2){ version=1; break; } x=y=0; sscanf(cbuffer,"CCP4 packed image V2, X: %04d, Y: %04d", &x,&y); if (x==dim1 || y ==dim2){ version=2; break; } c=cbuffer; } else c++; } // allocate memory for the arrays unpacked_array=(unsigned int*) malloc(sizeof(unsigned int)*dim1*dim2); if (!unpacked_array) return NULL; // relay to whichever version of ccp4_unpack is appropriate switch(version){ case 1: ccp4_unpack_string(unpacked_array,(void*)t_,dim1,dim2,0); break; case 2: ccp4_unpack_v2_string(unpacked_array,(void*)t_,dim1,dim2,0); break; default: return NULL; } // handle overflows while (ocount>0){ unsigned int adress,value; adress=odata[2*ocount-2]; if (adress){ value=odata[2*ocount-1]; // adresses start at 1 unpacked_array[adress-1]=value; } ocount--; } return unpacked_array; } // Henri start modif // void* mar345_read_data_2(const char* pFilePath, int ocount, int dim1, int dim2){ // FILE* f = fopen(pFilePath, "r"); // assert(f); // if(!f){ // printf("can't find file %s. Unable to read mar345 data\n", pFilePath); // return NULL; // } // void* res = mar345_read_data(f, ocount, dim1, dim2); // fclose(f); // return res; // } // Henri end modif // *unpack a new style mar345 image a'la what is done in CBFlib // * assumes the file is already positioned after the ascii header // * Perhaps the positioning should be done here as well. void * mar345_read_data(FILE *file, int ocount, int dim1, int dim2){ // first process overflow bytes - for now we just ignore them // these are stored in 64 byte records int orecords=(int)(ocount/8.0+0.875); int *odata,x,y,version=0; char *c,cbuffer[64]=""; unsigned int *unpacked_array; odata=(int*)malloc(64*8*orecords); if (!odata) return NULL; pfail_nonzero (orecords-fread(odata,64,orecords,file)); // there is no stdout in a gui, sorry // now after they have been read find the CCP4.....string and compare to dim1 c=cbuffer; while((*c)!=EOF){ if (c==cbuffer+63){ c=cbuffer; } *c=(char)getc(file); // set the next character to a \0 so the string is always terminated *(c+1)='\0'; if (*c=='\n'){ // check for the CCP- string x=y=0; sscanf(cbuffer,"CCP4 packed image, X: %04d, Y: %04d", &x,&y); if (x==dim1 || y ==dim2){ version=1; break; } x=y=0; sscanf(cbuffer,"CCP4 packed image V2, X: %04d, Y: %04d", &x,&y); if (x==dim1 || y ==dim2){ version=2; break; } c=cbuffer; } else c++; } // allocate memory for the arrays unpacked_array=(unsigned int*) malloc(sizeof(unsigned int)*dim1*dim2); if (!unpacked_array) return NULL; // relay to whichever version of ccp4_unpack is appropriate switch(version){ case 1: ccp4_unpack(unpacked_array,(void*)file,dim1,dim2,0); break; case 2: ccp4_unpack_v2(unpacked_array,(void*)file,dim1,dim2,0); break; default: return NULL; } // handle overflows while (ocount>0){ unsigned int adress,value; adress=odata[2*ocount-2]; if (adress){ value=odata[2*ocount-1]; // adresses start at 1 unpacked_array[adress-1]=value; } ocount--; } return unpacked_array; } // *unpack a ccp4-style packed array into the memory location pointed to by unpacked_array // * if this is null allocate memory and return a pointer to it // * \return NULL if unsuccessful // * TODO change this to read directly from the FILE to not waste memory void * ccp4_unpack( void *unpacked_array, void *packed, size_t dim1,size_t dim2, size_t max_num_int ){ uint8_t t_,t2,_conv; int err_val,bit_offset,num_error=0,num_bits=0,read_bits; int i; int x4,x3,x2,x1; unsigned int *int_arr=(unsigned int *) unpacked_array; FILE *instream=(FILE *)packed; // if no maximum integers are give read the whole nine yards if (max_num_int==0){ max_num_int=dim1*dim2; } // if a NULL pointer is passed allocate some new memory if (unpacked_array==NULL){ if ( (unpacked_array=malloc(sizeof(unsigned int)*max_num_int))==NULL){ errno=ENOMEM; return NULL; } } // packed bits always start at byte boundary after header bit_offset=0; // read the first byte of the current_block t_=(unsigned char)fgetc(instream); // while less than num ints have been unpacked i=0; while(i=(8-CCP4_PCK_BLOCK_HEADER_LENGTH)){ // we'll be reading past the next byte boundary t2=(unsigned char ) fgetc(instream); t_=(t_>>bit_offset) + ((unsigned char)t2 <<(8-bit_offset) ); num_error=CCP4_PCK_ERR_COUNT[t_ & CCP4_PCK_MASK[3]]; num_bits=CCP4_PCK_BIT_COUNT[(t_>>3) & CCP4_PCK_MASK[3]]; bit_offset=CCP4_PCK_BLOCK_HEADER_LENGTH+bit_offset-8; t_=t2; }else{ num_error=CCP4_PCK_ERR_COUNT[(t_>>bit_offset) & CCP4_PCK_MASK[3]]; num_bits=CCP4_PCK_BIT_COUNT[(t_>>(3+bit_offset)) & CCP4_PCK_MASK[3]]; bit_offset+=CCP4_PCK_BLOCK_HEADER_LENGTH; } } else { // reading the data in the block while(num_error>0){ err_val=0; read_bits=0; while(read_bits=8) { // read to next full byte boundary and convert _conv= (t_>>bit_offset) & CCP4_PCK_MASK[8-bit_offset]; err_val|= (unsigned int) _conv << read_bits; read_bits+=(8-bit_offset); // have read to byte boundary - set offset to 0 and read next byte bit_offset=0; t_=(unsigned char) fgetc(instream); } else { // must stop before next byte boundary - also this means that these are the last bits in the error _conv= (t_ >>bit_offset) & CCP4_PCK_MASK[num_bits-read_bits]; err_val|= _conv<dim1){ // the current pixel is not in the first row - averaging is possible // n.b. the averaging calculation is performed in the 2's complement domain x4=(int16_t) int_arr[i-1]; x3=(int16_t) int_arr[i-dim1+1]; x2=(int16_t) int_arr[i-dim1]; x1=(int16_t) int_arr[i-dim1-1]; int_arr[i]=(uint16_t) (err_val + (x4 + x3 + x2 + x1 +2) /4 ); i=i; } else if (i!=0){ // current pixel is in the 1st row but is not first pixel int_arr[i]=(uint16_t) (err_val + int_arr[i-1]); } else { int_arr[i]=(uint16_t) err_val; } i++; num_error--; } }// else } return (void *) unpacked_array; } void * ccp4_unpack_string( void *unpacked_array, void *packed, size_t dim1,size_t dim2, size_t max_num_int ){ uint8_t t_,t2,_conv; int err_val,bit_offset,num_error=0,num_bits=0,read_bits; int i; int x4,x3,x2,x1; unsigned int *int_arr; char *instream = (char *)packed; // if no maximum integers are give read the whole nine yards if (max_num_int==0){ max_num_int=dim1*dim2; } // if a NULL pointer is passed allocate some new memory if (unpacked_array==NULL){ if ( (unpacked_array=malloc(sizeof(unsigned int)*max_num_int))==NULL){ errno=ENOMEM; return NULL; } } int_arr = (unsigned int *) unpacked_array; // packed bits always start at byte boundary after header bit_offset=0; // read the first byte of the current_block t_=(unsigned char)*instream; instream++; // printf("%02X \n",t_); // while less than num ints have been unpacked i=0; while(i=(8-CCP4_PCK_BLOCK_HEADER_LENGTH)){ // we'll be reading past the next byte boundary t2=(unsigned char ) *instream; instream++; t_=(t_>>bit_offset) + ((unsigned char)t2 <<(8-bit_offset) ); num_error=CCP4_PCK_ERR_COUNT[t_ & CCP4_PCK_MASK[3]]; num_bits=CCP4_PCK_BIT_COUNT[(t_>>3) & CCP4_PCK_MASK[3]]; bit_offset=CCP4_PCK_BLOCK_HEADER_LENGTH+bit_offset-8; t_=t2; }else{ num_error=CCP4_PCK_ERR_COUNT[(t_>>bit_offset) & CCP4_PCK_MASK[3]]; num_bits=CCP4_PCK_BIT_COUNT[(t_>>(3+bit_offset)) & CCP4_PCK_MASK[3]]; bit_offset+=CCP4_PCK_BLOCK_HEADER_LENGTH; } } else { // reading the data in the block while(num_error>0){ err_val=0; read_bits=0; while(read_bits=8) { // read to next full byte boundary and convert _conv= (t_>>bit_offset) & CCP4_PCK_MASK[8-bit_offset]; err_val|= (unsigned int) _conv << read_bits; read_bits+=(8-bit_offset); // have read to byte boundary - set offset to 0 and read next byte bit_offset=0; t_=(unsigned char) *instream; instream++; } else { // must stop before next byte boundary - also this means that these are the last bits in the error _conv= (t_ >>bit_offset) & CCP4_PCK_MASK[num_bits-read_bits]; err_val|= _conv<dim1){ // the current pixel is not in the first row - averaging is possible // n.b. the averaging calculation is performed in the 2's complement domain x4=(int16_t) int_arr[i-1]; x3=(int16_t) int_arr[i-dim1+1]; x2=(int16_t) int_arr[i-dim1]; x1=(int16_t) int_arr[i-dim1-1]; int_arr[i]=(uint16_t) (err_val + (x4 + x3 + x2 + x1 +2) /4 ); i=i; } else if (i!=0){ // current pixel is in the 1st row but is not first pixel int_arr[i]=(uint16_t) (err_val + int_arr[i-1]); } else { int_arr[i]=(uint16_t) err_val; } i++; num_error--; } } //else } return (void *) unpacked_array; } void * ccp4_unpack_v2( void *unpacked_array, void *packed, size_t dim1,size_t dim2, size_t max_num_int){ uint8_t t_,t2,_conv; int err_val,bit_offset,num_error=0,num_bits=0,read_bits; int i; unsigned int x4=0,x3=0,x2=0,x1=0; unsigned int *int_arr=(unsigned int *) unpacked_array; FILE *instream=(FILE *)packed; // if no maximum integers are give read the whole nine yards if (max_num_int==0){ max_num_int=dim1*dim2; } // if a NULL pointer is passed allocate some new memory if (unpacked_array==NULL){ if ( (unpacked_array=malloc(sizeof(unsigned int)*max_num_int))==NULL){ errno=ENOMEM; return NULL; } } // packed bits always start at byte boundary after header bit_offset=0; // read the first byte of the current_block t_=(unsigned char)fgetc(instream); // while less than num ints have been unpacked i=0; while(i=(8-CCP4_PCK_BLOCK_HEADER_LENGTH_V2)){ // we'll be reading past the next byte boundary t2=(unsigned char ) fgetc(instream); t_=(t_>>bit_offset) + ((unsigned char)t2 <<(8-bit_offset) ); num_error=CCP4_PCK_ERR_COUNT_V2[t_ & CCP4_PCK_MASK[4]]; num_bits=CCP4_PCK_BIT_COUNT_V2[(t_>>4) & CCP4_PCK_MASK[4]]; bit_offset=CCP4_PCK_BLOCK_HEADER_LENGTH_V2+bit_offset-8; t_=t2; }else{ num_error=CCP4_PCK_ERR_COUNT_V2[ (t_>>bit_offset) & CCP4_PCK_MASK[4] ]; num_bits=CCP4_PCK_BIT_COUNT_V2[ (t_>>(4+bit_offset)) & CCP4_PCK_MASK[4] ]; bit_offset+=CCP4_PCK_BLOCK_HEADER_LENGTH_V2; } } else { // reading the data in the block while(num_error>0){ err_val=0; read_bits=0; while(read_bits=8) { // read to next full byte boundary and convert _conv= (t_>>bit_offset) & CCP4_PCK_MASK[8-bit_offset]; err_val|= (unsigned int) _conv << read_bits; read_bits+=(8-bit_offset); // have read to byte boundary - set offset to 0 and read next byte bit_offset=0; t_=(unsigned char) fgetc(instream); } else { // must stop before next byte boundary - also this means that these are the last bits in the error _conv= (t_ >>bit_offset) & CCP4_PCK_MASK[num_bits-read_bits]; err_val|= _conv<dim1){ // the current pixel is not in the first row - averaging is possible // n.b. the averaging calculation is performed in the 2's complement domain x4=(int16_t) int_arr[i-1]; x3=(int16_t) int_arr[i-dim1+1]; x2=(int16_t) int_arr[i-dim1]; x1=(int16_t) int_arr[i-dim1-1]; int_arr[i]=(uint16_t) (err_val + (x4 + x3 + x2 + x1 +2) /4 ); i=i; } else if (i!=0){ // current pixel is in the 1st row but is not first pixel int_arr[i]=(uint16_t) (err_val + int_arr[i-1]); } else { int_arr[i]=(uint16_t) err_val; } i++; num_error--; } } // else } return (void *) unpacked_array; } void * ccp4_unpack_v2_string( void *unpacked_array, void *packed, size_t dim1,size_t dim2, size_t max_num_int){ uint8_t t_,t2,_conv; int err_val,bit_offset,num_error=0,num_bits=0,read_bits; int i; unsigned int x4=0,x3=0,x2=0,x1=0; unsigned int *int_arr=(unsigned int *) unpacked_array; char *instream=(char *)packed; // if no maximum integers are give read the whole nine yards if (max_num_int==0){ max_num_int=dim1*dim2; } // if a NULL pointer is passed allocate some new memory if (unpacked_array==NULL){ if ( (unpacked_array=malloc(sizeof(unsigned int)*max_num_int))==NULL){ errno=ENOMEM; return NULL; } } // packed bits always start at byte boundary after header bit_offset=0; // read the first byte of the current_block t_=(unsigned char)*instream; instream++; // while less than num ints have been unpacked i=0; while(i=(8-CCP4_PCK_BLOCK_HEADER_LENGTH_V2)){ // we'll be reading past the next byte boundary t2=(unsigned char ) *instream; instream++; t_=(t_>>bit_offset) + ((unsigned char)t2 <<(8-bit_offset) ); num_error=CCP4_PCK_ERR_COUNT_V2[t_ & CCP4_PCK_MASK[4]]; num_bits=CCP4_PCK_BIT_COUNT_V2[(t_>>4) & CCP4_PCK_MASK[4]]; bit_offset=CCP4_PCK_BLOCK_HEADER_LENGTH_V2+bit_offset-8; t_=t2; }else{ num_error=CCP4_PCK_ERR_COUNT_V2[ (t_>>bit_offset) & CCP4_PCK_MASK[4] ]; num_bits=CCP4_PCK_BIT_COUNT_V2[ (t_>>(4+bit_offset)) & CCP4_PCK_MASK[4] ]; bit_offset+=CCP4_PCK_BLOCK_HEADER_LENGTH_V2; } } else { // reading the data in the block while(num_error>0){ err_val=0; read_bits=0; while(read_bits=8) { // read to next full byte boundary and convert _conv= (t_>>bit_offset) & CCP4_PCK_MASK[8-bit_offset]; err_val|= (unsigned int) _conv << read_bits; read_bits+=(8-bit_offset); // have read to byte boundary - set offset to 0 and read next byte bit_offset=0; t_=(unsigned char) *instream; instream++; } else { // must stop before next byte boundary - also this means that these are the last bits in the error _conv= (t_ >>bit_offset) & CCP4_PCK_MASK[num_bits-read_bits]; err_val|= _conv<dim1){ // the current pixel is not in the first row - averaging is possible // n.b. the averaging calculation is performed in the 2's complement domain x4=(int16_t) int_arr[i-1]; x3=(int16_t) int_arr[i-dim1+1]; x2=(int16_t) int_arr[i-dim1]; x1=(int16_t) int_arr[i-dim1-1]; int_arr[i]=(uint16_t) (err_val + (x4 + x3 + x2 + x1 +2) /4 ); i=i; } else if (i!=0){ // current pixel is in the 1st row but is not first pixel int_arr[i]=(uint16_t) (err_val + int_arr[i-1]); } else { int_arr[i]=(uint16_t) err_val; } i++; num_error--; } } // else } return (void *) unpacked_array; } // ############################################################################# // ################### Everything to write Mar345 ############################## // ############################################################################# // Returns the number of bits neccesary to encode the longword-array 'chunk' // of size 'n' The size in bits of one encoded element can be 0, 4, 5, 6, 7, // 8, 16 or 32. int bits( int32_t *chunk, int n){ int size, maxsize, i; for (i = 1, maxsize = abs(chunk[0]); i < n; ++i) maxsize = max(maxsize, abs(chunk[i])); if (maxsize == 0) size = 0; else if (maxsize < 8) size = 4 * n; else if (maxsize < 16) size = 5 * n; else if (maxsize < 32) size = 6 * n; else if (maxsize < 64) size = 7 * n; else if (maxsize < 128) size = 8 * n; else if (maxsize < 32768) size = 16 * n; else size = 32 * n; return(size); } // Calculates the difference of WORD-sized pixels of an image with the // truncated mean value of four of its neighbours. 'x' is the number of fast // coordinates of the image 'img', 'y' is the number of slow coordinates, // 'diffs' will contain the differences, 'done' defines the index of the pixel // where calculating the differences should start. A pointer to the last // difference is returned. Maximally DIFFBUFSIZ differences are returned in // 'diffs'. int *diff_words( short int *word, int x, int y, int *diffs, int done){ int i = 0; int tot = x * y; if (done == 0) { *diffs = word[0]; ++diffs; ++done; ++i;} while ((done <= x) && (i < DIFFBUFSIZ)) { *diffs = word[done] - word[done - 1]; ++diffs; ++done; ++i;} while ((done < tot) && (i < DIFFBUFSIZ)) { *diffs = word[done] - (word[done - 1] + word[done - x + 1] + word[done - x] + word[done - x - 1] + 2) / 4; ++diffs; ++done; ++i;} return(--diffs); } // Pack 'n' WORDS, starting with 'lng[0]' into the packed array 'target'. The // elements of such a packed array do not obey BYTE-boundaries, but are put one // behind the other without any spacing. Only the 'bitsiz' number of least // significant bits are used. The starting bit of 'target' is 'bit' (bits range // from 0 to 7). After completion of 'pack_words()', both '**target' and '*bit' // are updated and define the next position in 'target' from which packing // could continue. void pack_longs(int32_t *lng, int n, char **target, int *bit, int size){ int32_t mask, window; int valids, i, temp; int temp_bit = *bit; char *temp_target = *target; if (size > 0) { mask = CCP4_PCK_MASK_32[size]; for (i = 0; i < n; ++i) { window = lng[i] & mask; valids = size; if (temp_bit == 0) *temp_target = (char) window; else { temp = shift_left(window, temp_bit); *temp_target |= temp;} window = shift_right(window, 8 - temp_bit); valids = valids - (8 - temp_bit); if (valids < 0) temp_bit += size; else { while (valids > 0) { *++temp_target = (char) window; window = shift_right(window, 8); valids -= 8;} temp_bit = 8 + valids;} if (valids == 0) { temp_bit = 0; ++temp_target;}} *target = temp_target; *bit = (*bit + (size * n)) % 8;} } // Packs 'nmbr' LONGs starting at 'lng[0]' into a packed array of 'bitsize' // sized elements. If the internal buffer in which the array is packed is full, // it is flushed to 'file', making room for more of the packed array. If // ('lng == NULL'), the buffer is flushed a swell. void pack_chunk(int32_t *lng, int nmbr, int bitsize, FILE *packfile){ static int32_t bitsize_encode[33] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7}; int32_t descriptor[2], i, j; static char *buffer = NULL; static char *buffree = NULL; static int bitmark; if (buffer == NULL) { buffree = buffer = (char *) malloc(PACKBUFSIZ); bitmark = 0;} if (lng != NULL) { for (i = nmbr, j = 0; i > 1; i /= 2, ++j); descriptor[0] = j; descriptor[1] = bitsize_encode[bitsize]; if ((buffree - buffer) > (PACKBUFSIZ - (130 * 4))) { fwrite(buffer, sizeof(char), buffree - buffer, packfile); buffer[0] = buffree[0]; buffree = buffer;} pack_longs(descriptor, 2, &buffree, &bitmark, 3); pack_longs(lng, nmbr, &buffree, &bitmark, bitsize);} else { int len=buffree-buffer; if (bitmark!=0) len++; fwrite(buffer, sizeof(char), len, packfile); free((void *) buffer); buffer = NULL;}} // Pack image 'img', containing 'x * y' WORD-sized pixels into 'filename'. void pack_wordimage_copen(short int *img, int x, int y, FILE *packfile){ int chunksiz, packsiz, nbits, next_nbits, tot_nbits; int32_t buffer[DIFFBUFSIZ]; int32_t *diffs = buffer; int32_t *end = diffs - 1; int32_t done = 0; fprintf(packfile, PACKIDENTIFIER, x, y); while (done < (x * y)) { end = diff_words(img, x, y, buffer, done); done += (end - buffer) + 1; diffs = buffer; while (diffs <= end) { packsiz = 0; chunksiz = 1; nbits = bits(diffs, 1); while (packsiz == 0) { if (end <= (diffs + chunksiz * 2)) packsiz = chunksiz; else { next_nbits = bits(diffs + chunksiz, chunksiz); tot_nbits = 2 * max(nbits, next_nbits); if (tot_nbits >= (nbits + next_nbits + 6)) packsiz = chunksiz; else { nbits = tot_nbits; if (chunksiz == 64) packsiz = 128; else chunksiz *= 2;}}} pack_chunk(diffs, packsiz, nbits / packsiz, packfile); diffs += packsiz;}} pack_chunk(NULL, 0, 0, packfile); } void pack_wordimage_c( short int *img, int x, int y, char *filename){ FILE *packfile = fopen(filename, "ab"); if (packfile == NULL) { fprintf(stderr,"The file %s cannot be created!\n ...giving up...\n", filename); exit(1); } else { pack_wordimage_copen(img, x, y, packfile); fclose(packfile); } } fabio-0.6.0/fabio/ext/setup.py0000644001611600070440000000452213227357030017307 0ustar kiefferscisoft00000000000000# coding: utf-8 # /*########################################################################## # Copyright (C) 2016 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ############################################################################*/ __authors__ = ["V. Valls"] __license__ = "MIT" __date__ = "01/08/2017" import os import numpy from numpy.distutils.misc_util import Configuration def configuration(parent_package='', top_path=None): config = Configuration('ext', parent_package, top_path) config.add_extension( name="cf_io", sources=["cf_io.pyx", os.path.join("src", "columnfile.c")], include_dirs=["include", numpy.get_include()], language='c') config.add_extension( name="byte_offset", sources=["byte_offset.pyx"], include_dirs=[numpy.get_include()], language='c') config.add_extension( name="mar345_IO", sources=["mar345_IO.pyx", os.path.join("src", "ccp4_pack.c")], include_dirs=["include", numpy.get_include()], language='c') config.add_extension( name="_cif", sources=["_cif.pyx"], include_dirs=[numpy.get_include()], language='c') return config if __name__ == "__main__": from numpy.distutils.core import setup setup(configuration=configuration) fabio-0.6.0/fabio/ext/include/0000755001611600070440000000000013227375744017231 5ustar kiefferscisoft00000000000000fabio-0.6.0/fabio/ext/include/ccp4_pack.h0000644001611600070440000001012613227357030021215 0ustar kiefferscisoft00000000000000/* Fabio Mar345 ccp4_pack decompressor Copyright (C) 2007-2009 Henning O. Sorensen & Erik Knudsen This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef CPP4_PACK_H #define CPP4_PACK_H #if !defined(_MSC_VER) || (_MSC_VER >= 1900) #include #else #include "msvc\\stdint.h" #endif #include #include #include #define CCP4_PCK_BLOCK_HEADER_LENGTH 6 #define CCP4_PCK_BLOCK_HEADER_LENGTH_V2 8 #define PACKIDENTIFIER "\nCCP4 packed image, X: %04d, Y: %04d\n" // This string defines the start of a packed image. An image file is scanned // until this string is encountered, the size of the unpacked image is // determined from the values of X and Y (which are written out as formatted // ascii numbers), and the packed image is expected to start immediately after // the null-character ending the string. #define V2IDENTIFIER "\nCCP4 packed image V2, X: %04d, Y: %04d\n" // This string defines the start of a packed image. An image file is scanned // until this string is encountered, the size of the unpacked image is // determined from the values of X and Y (which are written out as formatted // ascii numbers), and the packed image is expected to start immediately after // the null-character ending the string. #define PACKBUFSIZ BUFSIZ // Size of internal buffer in which the packed array is stored during transit // form an unpacked image to a packed image on disk. It is set to the size // used by the buffered io-routines given in , but it could be // anything. #define DIFFBUFSIZ 16384L // Size of the internal buffer in which the differences between neighbouring // pixels are stored prior to compression. The image is therefore compressed // in DIFFBUFSIZ chunks. Decompression does not need to know what DIFFBUFSIZ // was when the image was compressed. By increasing this value, the image // can be compressed into a packed image which is a few bytes smaller. Do // not decrease the value of DIFFBUFSIZ below 128L. #define pfail_nonzero(a) if ((a)) return NULL; #define max(x, y) (((x) > (y)) ? (x) : (y)) #define min(x, y) (((x) < (y)) ? (x) : (y)) #define shift_left(x, n) (((x) & CCP4_PCK_MASK_32[32 - (n)]) << (n)) #define shift_right(x, n) (((x) >> (n)) & CCP4_PCK_MASK_32[32 - (n)]) // This macro is included because the C standard does not properly define a // left shift: on some machines the bits which are pushed out at the left are // popped back in at the right. By masking, the macro prevents this behaviour. // If you are sure that your machine does not pops bits back in, you can speed // up the code insignificantly by taking out the masking. // read data from a file void* mar345_read_data(FILE *file, int ocount, int dim1, int dim2); // read data from a stream void* mar345_read_data_string(char *instring, int ocount, int dim1, int dim2); // unpack the given data void* ccp4_unpack(void *unpacked_array, void *packed, size_t dim1, size_t dim2, size_t max_num_int); // unpack the given data void* ccp4_unpack_v2(void *unpacked_array, void *packed, size_t dim1, size_t dim2, size_t max_num_int); // unpack the given data void* ccp4_unpack_string(void *unpacked_array, void *packed, size_t dim1, size_t dim2, size_t max_num_int); // unpack the given data void* ccp4_unpack_v2_string(void *unpacked_array, void *packed, size_t dim1, size_t dim2, size_t max_num_int); void pack_wordimage_c(short int *img, int x, int y, char *filename); #endif // CPP4_PACK_H fabio-0.6.0/fabio/ext/include/columnfile.h0000644001611600070440000000302513227357030021523 0ustar kiefferscisoft00000000000000#ifndef CF_H #define CF_H 1 #include #include #include #define CF_INIT_ROWS 8192 #define CF_INIT_COLS 32 #define CF_HEADER_ITEM 128 #define CF_GZ_COMP 1 #define CF_BIN 2 #define repeat16_inc(name,offset) \ *((name)+(offset)),*((name)+(offset)+1),*((name)+(offset)+2),*((name)+(offset)+3),*((name)+(offset)+4), \ *((name)+(offset)+5),*((name)+(offset)+6),*((name)+(offset)+7),*((name)+(offset)+8),*((name)+(offset)+9), \ *((name)+(offset)+10),*((name)+(offset)+11),*((name)+(offset)+12),*((name)+(offset)+13),*((name)+(offset)+14),*((name)+(offset)+15) #define cf_check_realloc(p,i,chunk_size,item_size) \ do {\ if((i)%(chunk_size)==0){\ } while (0); #define cf_sscan_column(source,conversion,dest,prefix) \ do {\ int tmpi=0;\ if ((prefix)!=NULL) sscanf(source,prefix);\ while (sscanf( (source) , (conversion) , ((dest) +tmpi))){\ tmpi++;\ }\ } while (0); #define is_ws(character) \ ( (character==' ') || ((character)=='\t') || ((character)=='\v') || ((character) =='\r') || ((character) =='\n') ) typedef struct cf_data{ int ncols,nrows; unsigned int nralloc; double **data; char **clabels; } cf_data; void * cf_read_ascii(void *fp, void *dest, unsigned int FLAGS); void * cf_read_bin(void *fp, void *dest, unsigned int FLAGS); int cf_write(char *fname, void *cf_handle, unsigned int FLAGS); int cf_write_bin(void *fp, void *cf_handle); int cf_write_ascii(void *fp, void *cf_handle,unsigned int FLAGS); void cf_free( cf_data *cf_handle); #endif fabio-0.6.0/fabio/ext/include/msvc/0000755001611600070440000000000013227375744020201 5ustar kiefferscisoft00000000000000fabio-0.6.0/fabio/ext/include/msvc/stdint.h0000644001611600070440000001746113227357030021654 0ustar kiefferscisoft00000000000000// ISO C9x compliant stdint.h for Microsoft Visual Studio // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 // // Copyright (c) 2006-2008 Alexander Chemeris // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. The name of the author may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #ifndef _MSC_VER // [ #error "Use this header only with Microsoft Visual C++ compilers!" #endif // _MSC_VER ] #ifndef _MSC_STDINT_H_ // [ #define _MSC_STDINT_H_ #if _MSC_VER > 1000 #pragma once #endif #include // For Visual Studio 6 in C++ mode and for many Visual Studio versions when // compiling for ARM we should wrap include with 'extern "C++" {}' // or compiler give many errors like this: // error C2733: second C linkage of overloaded function 'wmemchr' not allowed #ifdef __cplusplus extern "C" { #endif # include #ifdef __cplusplus } #endif // Define _W64 macros to mark types changing their size, like intptr_t. #ifndef _W64 # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 # define _W64 __w64 # else # define _W64 # endif #endif // 7.18.1 Integer types // 7.18.1.1 Exact-width integer types // Visual Studio 6 and Embedded Visual C++ 4 doesn't // realize that, e.g. char has the same size as __int8 // so we give up on __intX for them. #if (_MSC_VER < 1300) typedef char int8_t; typedef short int16_t; typedef int int32_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; #else typedef __int8 int8_t; typedef __int16 int16_t; typedef __int32 int32_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; #endif typedef __int64 int64_t; typedef unsigned __int64 uint64_t; // 7.18.1.2 Minimum-width integer types typedef int8_t int_least8_t; typedef int16_t int_least16_t; typedef int32_t int_least32_t; typedef int64_t int_least64_t; typedef uint8_t uint_least8_t; typedef uint16_t uint_least16_t; typedef uint32_t uint_least32_t; typedef uint64_t uint_least64_t; // 7.18.1.3 Fastest minimum-width integer types typedef int8_t int_fast8_t; typedef int16_t int_fast16_t; typedef int32_t int_fast32_t; typedef int64_t int_fast64_t; typedef uint8_t uint_fast8_t; typedef uint16_t uint_fast16_t; typedef uint32_t uint_fast32_t; typedef uint64_t uint_fast64_t; // 7.18.1.4 Integer types capable of holding object pointers #ifdef _WIN64 // [ typedef __int64 intptr_t; typedef unsigned __int64 uintptr_t; #else // _WIN64 ][ typedef _W64 int intptr_t; typedef _W64 unsigned int uintptr_t; #endif // _WIN64 ] // 7.18.1.5 Greatest-width integer types typedef int64_t intmax_t; typedef uint64_t uintmax_t; // 7.18.2 Limits of specified-width integer types #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 // 7.18.2.1 Limits of exact-width integer types #define INT8_MIN ((int8_t)_I8_MIN) #define INT8_MAX _I8_MAX #define INT16_MIN ((int16_t)_I16_MIN) #define INT16_MAX _I16_MAX #define INT32_MIN ((int32_t)_I32_MIN) #define INT32_MAX _I32_MAX #define INT64_MIN ((int64_t)_I64_MIN) #define INT64_MAX _I64_MAX #define UINT8_MAX _UI8_MAX #define UINT16_MAX _UI16_MAX #define UINT32_MAX _UI32_MAX #define UINT64_MAX _UI64_MAX // 7.18.2.2 Limits of minimum-width integer types #define INT_LEAST8_MIN INT8_MIN #define INT_LEAST8_MAX INT8_MAX #define INT_LEAST16_MIN INT16_MIN #define INT_LEAST16_MAX INT16_MAX #define INT_LEAST32_MIN INT32_MIN #define INT_LEAST32_MAX INT32_MAX #define INT_LEAST64_MIN INT64_MIN #define INT_LEAST64_MAX INT64_MAX #define UINT_LEAST8_MAX UINT8_MAX #define UINT_LEAST16_MAX UINT16_MAX #define UINT_LEAST32_MAX UINT32_MAX #define UINT_LEAST64_MAX UINT64_MAX // 7.18.2.3 Limits of fastest minimum-width integer types #define INT_FAST8_MIN INT8_MIN #define INT_FAST8_MAX INT8_MAX #define INT_FAST16_MIN INT16_MIN #define INT_FAST16_MAX INT16_MAX #define INT_FAST32_MIN INT32_MIN #define INT_FAST32_MAX INT32_MAX #define INT_FAST64_MIN INT64_MIN #define INT_FAST64_MAX INT64_MAX #define UINT_FAST8_MAX UINT8_MAX #define UINT_FAST16_MAX UINT16_MAX #define UINT_FAST32_MAX UINT32_MAX #define UINT_FAST64_MAX UINT64_MAX // 7.18.2.4 Limits of integer types capable of holding object pointers #ifdef _WIN64 // [ # define INTPTR_MIN INT64_MIN # define INTPTR_MAX INT64_MAX # define UINTPTR_MAX UINT64_MAX #else // _WIN64 ][ # define INTPTR_MIN INT32_MIN # define INTPTR_MAX INT32_MAX # define UINTPTR_MAX UINT32_MAX #endif // _WIN64 ] // 7.18.2.5 Limits of greatest-width integer types #define INTMAX_MIN INT64_MIN #define INTMAX_MAX INT64_MAX #define UINTMAX_MAX UINT64_MAX // 7.18.3 Limits of other integer types #ifdef _WIN64 // [ # define PTRDIFF_MIN _I64_MIN # define PTRDIFF_MAX _I64_MAX #else // _WIN64 ][ # define PTRDIFF_MIN _I32_MIN # define PTRDIFF_MAX _I32_MAX #endif // _WIN64 ] #define SIG_ATOMIC_MIN INT_MIN #define SIG_ATOMIC_MAX INT_MAX #ifndef SIZE_MAX // [ # ifdef _WIN64 // [ # define SIZE_MAX _UI64_MAX # else // _WIN64 ][ # define SIZE_MAX _UI32_MAX # endif // _WIN64 ] #endif // SIZE_MAX ] // WCHAR_MIN and WCHAR_MAX are also defined in #ifndef WCHAR_MIN // [ # define WCHAR_MIN 0 #endif // WCHAR_MIN ] #ifndef WCHAR_MAX // [ # define WCHAR_MAX _UI16_MAX #endif // WCHAR_MAX ] #define WINT_MIN 0 #define WINT_MAX _UI16_MAX #endif // __STDC_LIMIT_MACROS ] // 7.18.4 Limits of other integer types #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 // 7.18.4.1 Macros for minimum-width integer constants #define INT8_C(val) val##i8 #define INT16_C(val) val##i16 #define INT32_C(val) val##i32 #define INT64_C(val) val##i64 #define UINT8_C(val) val##ui8 #define UINT16_C(val) val##ui16 #define UINT32_C(val) val##ui32 #define UINT64_C(val) val##ui64 // 7.18.4.2 Macros for greatest-width integer constants #define INTMAX_C INT64_C #define UINTMAX_C UINT64_C #endif // __STDC_CONSTANT_MACROS ] #endif // _MSC_STDINT_H_ ] fabio-0.6.0/fabio/fit2dmaskimage.py0000644001611600070440000001235513227357030020241 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ Author: Andy Hammersley, ESRF Translation into python/fabio: Jon Wright, ESRF. Writer: Jérôme Kieffer """ # Get ready for python3: from __future__ import with_statement, print_function __authors__ = ["Jon Wright", "Jérôme Kieffer"] __contact__ = "Jerome.Kieffer@esrf.fr" __license__ = "GPLv3+" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __version__ = "06/01/2015" import numpy import sys import struct from .fabioimage import FabioImage if sys.version < '3': bytes = str class Fit2dMaskImage(FabioImage): """ Read and try to write Andy Hammersley's mask format """ DESCRIPTION = "Fit2d mask file format" DEFAULT_EXTENSIONS = ["msk"] def _readheader(self, infile): """ Read in a header from an already open file """ # 1024 bytes gives 256x32 bit integers header = infile.read(1024) for i, j in [(b"M", 0), (b"A", 4), (b"S", 8), (b"K", 12)]: if header[j] != i[0]: raise Exception("Not a fit2d mask file") fit2dhdr = numpy.fromstring(header, numpy.int32) # Enforce little endian if not numpy.little_endian: fit2dhdr.byteswap(True) self._dim1 = fit2dhdr[4] # 1 less than Andy's fortran self._dim2 = fit2dhdr[5] def read(self, fname, frame=None): """ Read in header into self.header and the data into self.data """ fin = self._open(fname) self._readheader(fin) # Compute image size self.bytecode = numpy.uint8 self.bpp = numpy.dtype(self.bytecode).itemsize # integer division num_ints = (self._dim1 + 31) // 32 total = self._dim2 * num_ints * 4 data = fin.read(total) assert len(data) == total fin.close() # Now to unpack it data = numpy.fromstring(data, numpy.uint8) if not numpy.little_endian: data.byteswap(True) data = numpy.reshape(data, (self._dim2, num_ints * 4)) result = numpy.zeros((self._dim2, num_ints * 4 * 8), numpy.uint8) # Unpack using bitwise comparisons to 2**n bits = numpy.ones((1), numpy.uint8) for i in range(8): temp = numpy.bitwise_and(bits, data) result[:, i::8] = temp.astype(numpy.uint8) bits = bits * 2 # Extra rows needed for packing odd dimensions spares = num_ints * 4 * 8 - self._dim1 if spares == 0: data = numpy.where(result == 0, 0, 1) else: data = numpy.where(result[:, :-spares] == 0, 0, 1) # Transpose appears to be needed to match edf reader (scary??) # self.data = numpy.transpose(self.data) self.data = numpy.ascontiguousarray(data, dtype=numpy.uint8).reshape(self._dim2, self._dim1) self.pilimage = None return self def write(self, fname): """ Try to write a file """ header = bytearray(b"\x00" * 1024) header[0] = 77 # M header[4] = 65 # A header[8] = 83 # S header[12] = 75 # K header[24] = 1 # 1 header[16:20] = struct.pack("", "").split(".")[-1] return self._classname classname = property(getclassname) def getframe(self, num): """ returns the file numbered 'num' in the series as a fabioimage """ if self.nframes == 1: # single image per file from .openimage import openimage return openimage(fabioutils.jump_filename(self.filename, num)) raise Exception("getframe out of range") def previous(self): """ returns the previous file in the series as a fabioimage """ from .openimage import openimage return openimage(fabioutils.previous_filename(self.filename)) def next(self): """Returns the next file in the series as a fabioimage :raise IOError: When there is no next file in the series. """ from .openimage import openimage return openimage( fabioutils.next_filename(self.filename)) def toPIL16(self, filename=None): """ Convert to Python Imaging Library 16 bit greyscale image """ if filename: self.read(filename) if self.pilimage is None: # Create and cache the result self.pilimage = pilutils.create_pil_16(self.data) return self.pilimage def getheader(self): """ returns self.header """ return self.header def getmax(self): """ Find max value in self.data, caching for the future """ if self.maxval is None: if self.data is not None: self.maxval = self.data.max() return self.maxval def getmin(self): """ Find min value in self.data, caching for the future """ if self.minval is None: if self.data is not None: self.minval = self.data.min() return self.minval def make_slice(self, coords): """ Convert a len(4) set of coords into a len(2) tuple (pair) of slice objects the latter are immutable, meaning the roi can be cached """ assert len(coords) == 4 if len(coords) == 4: # fabian edfimage preference if coords[0] > coords[2]: coords[0:3:2] = [coords[2], coords[0]] if coords[1] > coords[3]: coords[1:4:2] = [coords[3], coords[1]] # in fabian: normally coordinates are given as (x,y) whereas # a matrix is given as row,col # also the (for whichever reason) the image is flipped upside # down wrt to the matrix hence these tranformations fixme = (self.dim2 - coords[3] - 1, coords[0], self.dim2 - coords[1] - 1, coords[2]) return (slice(int(fixme[0]), int(fixme[2]) + 1), slice(int(fixme[1]), int(fixme[3]) + 1)) def integrate_area(self, coords): """ Sums up a region of interest if len(coords) == 4 -> convert coords to slices if len(coords) == 2 -> use as slices floor -> ? removed as unused in the function. """ if self.data is None: # This should return NAN, not zero ? return 0 if len(coords) == 4: sli = self.make_slice(coords) elif len(coords) == 2 and isinstance(coords[0], slice) and \ isinstance(coords[1], slice): sli = coords if sli == self.slice and self.area_sum is not None: pass elif sli == self.slice and self.roi is not None: self.area_sum = self.roi.sum(dtype=numpy.float) else: self.slice = sli self.roi = self.data[self.slice] self.area_sum = self.roi.sum(dtype=numpy.float) return self.area_sum def getmean(self): """ return the mean """ if self.mean is None: self.mean = self.data.mean(dtype=numpy.double) return self.mean def getstddev(self): """ return the standard deviation """ if self.stddev is None: self.stddev = self.data.std(dtype=numpy.double) return self.stddev def add(self, other): """ Add another Image - warning, does not clip to 16 bit images by default """ if not hasattr(other, 'data'): logger.warning('edfimage.add() called with something that ' 'does not have a data field') assert self.data.shape == other.data.shape, 'incompatible images - Do they have the same size?' self.data = self.data + other.data self.resetvals() def resetvals(self): """ Reset cache - call on changing data """ self.mean = self.stddev = self.maxval = self.minval = None self.roi = self.slice = self.area_sum = None def rebin(self, x_rebin_fact, y_rebin_fact, keep_I=True): """ Rebin the data and adjust dims :param int x_rebin_fact: x binning factor :param int y_rebin_fact: y binning factor :param bool keep_I: shall the signal increase ? """ if self.data is None: raise Exception('Please read in the file you wish to rebin first') if (self.dim1 % x_rebin_fact != 0) or (self.dim2 % y_rebin_fact != 0): raise RuntimeError('image size is not divisible by rebin factor - ' 'skipping rebin') else: dataIn = self.data.astype("float64") shapeIn = self.data.shape shapeOut = (shapeIn[0] // y_rebin_fact, shapeIn[1] // x_rebin_fact) binsize = y_rebin_fact * x_rebin_fact if binsize < 50: # method faster for small binning (4x4) out = numpy.zeros(shapeOut, dtype="float64") for j in range(x_rebin_fact): for i in range(y_rebin_fact): out += dataIn[i::y_rebin_fact, j::x_rebin_fact] else: # method faster for large binning (8x8) temp = self.data.astype("float64") temp.shape = (shapeOut[0], y_rebin_fact, shapeOut[1], x_rebin_fact) out = temp.sum(axis=3).sum(axis=1) self.resetvals() if keep_I: self.data = (out / (y_rebin_fact * x_rebin_fact)).astype(self.data.dtype) else: self.data = out.astype(self.data.dtype) self.dim1 = self.dim1 / x_rebin_fact self.dim2 = self.dim2 / y_rebin_fact # update header self.update_header() def write(self, fname): """ To be overwritten - write the file """ module = sys.modules[self.__class__.__module__] raise NotImplementedError("Writing %s format is not implemented" % module.__name__) def save(self, fname): 'wrapper for write' self.write(fname) def readheader(self, filename): """ Call the _readheader function... """ # Override the needs asserting that all headers can be read via python modules save_state = self._need_a_real_file, self._need_a_seek_to_read self._need_a_real_file, self._need_a_seek_to_read = False, False fin = self._open(filename) self._readheader(fin) fin.close() self._need_a_real_file, self._need_a_seek_to_read = save_state def _readheader(self, fik_obj): """ Must be overridden in classes """ raise Exception("Class has not implemented _readheader method yet") def update_header(self, **kwds): """ update the header entries by default pass in a dict of key, values. """ self.header.update(kwds) def read(self, filename, frame=None): """ To be overridden - fill in self.header and self.data """ raise Exception("Class has not implemented read method yet") # return self def load(self, *arg, **kwarg): "Wrapper for read" return self.read(*arg, **kwarg) def readROI(self, filename, frame=None, coords=None): """ Method reading Region of Interest. This implementation is the trivial one, just doing read and crop """ self.read(filename, frame) if len(coords) == 4: self.slice = self.make_slice(coords) elif len(coords) == 2 and isinstance(coords[0], slice) and \ isinstance(coords[1], slice): self.slice = coords else: logger.warning('readROI: Unable to understand Region Of Interest: got %s', coords) self.roi = self.data[self.slice] return self.roi def _open(self, fname, mode="rb"): """ Try to handle compressed files, streams, shared memory etc Return an object which can be used for "read" and "write" ... FIXME - what about seek ? """ if hasattr(fname, "read") and hasattr(fname, "write"): # It is already something we can use if "name" in dir(fname): self.filename = fname.name else: self.filename = "stream" try: setattr(fname, "name", self.filename) except AttributeError: # cStringIO logger.warning("Unable to set filename attribute to stream (cStringIO?) of type %s" % type(fname)) return fname fileObject = None self.filename = fname self.filenumber = fabioutils.extract_filenumber(fname) if isinstance(fname, fabioutils.StringTypes): comp_type = os.path.splitext(fname)[-1] if comp_type == ".gz": fileObject = self._compressed_stream(fname, fabioutils.COMPRESSORS['.gz'], fabioutils.GzipFile, mode) elif comp_type == '.bz2': fileObject = self._compressed_stream(fname, fabioutils.COMPRESSORS['.bz2'], fabioutils.BZ2File, mode) # # Here we return the file even though it may be bzipped or gzipped # but named incorrectly... # # FIXME - should we fix that or complain about the daft naming? else: fileObject = fabioutils.File(fname, mode) if "name" not in dir(fileObject): fileObject.name = fname self._file = fileObject return fileObject def _compressed_stream(self, fname, system_uncompress, python_uncompress, mode='rb'): """ Try to transparently handle gzip / bzip2 without always getting python performance """ # assert that python modules are always OK based on performance benchmark # Try to fix the way we are using them? fobj = None if self._need_a_real_file and mode[0] == "r": fo = python_uncompress(fname, mode) # problem when not administrator under certain flavors of windows tmpfd, tmpfn = tempfile.mkstemp() os.close(tmpfd) fobj = fabioutils.File(tmpfn, "w+b", temporary=True) fobj.write(fo.read()) fo.close() fobj.seek(0) elif self._need_a_seek_to_read and mode[0] == "r": fo = python_uncompress(fname, mode) fobj = fabioutils.BytesIO(fo.read(), fname, mode) else: fobj = python_uncompress(fname, mode) return fobj def convert(self, dest): """ Convert a fabioimage object into another fabioimage object (with possible conversions) :param dest: destination type "EDF", "edfimage" or the class itself :return: instance of the new class """ other = None if type(dest) in fabioutils.StringTypes: dest = dest.lower() if dest.endswith("image"): dest = dest[:-5] if dest + "image" in self.registry: other = self.factory(dest + "image") else: # load modules which could be suitable: from . import fabioformats for class_ in fabioformats.get_classes_from_extension(dest): try: other = class_() except: pass elif isinstance(dest, self.__class__): other = dest.__class__() elif ("__new__" in dir(dest)) and isinstance(dest(), fabioimage): other = dest() else: logger.error("Unrecognized destination format: %s " % dest) return self other.data = converters.convert_data(self.classname, other.classname, self.data) other.header = converters.convert_header(self.classname, other.classname, self.header) return other def __iter__(self): current_image = self while True: yield current_image try: current_image = current_image.next() except IOError: raise StopIteration fabioimage = FabioImage fabio-0.6.0/fabio/datIO.py0000644001611600070440000000614713227357030016354 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. """ Authors: Henning O. Sorensen & Erik Knudsen Center for Fundamental Research: Metal Structures in Four Dimensions Risoe National Laboratory Frederiksborgvej 399 DK-4000 Roskilde email:erik.knudsen@risoe.dk and Jon Wright, ESRF """ # get ready for python3 from __future__ import with_statement, print_function class fabiodata(object): """ A common class for dataIO in fable Contains a 2d numpy array for keeping data, and two lists (clabels and rlabels) containing labels for columns and rows respectively """ def __init__(self, data=None, clabels=None, rlabels=None, fname=None): """ set up initial values """ if isinstance(data, str): raise RuntimeError("fabioimage.__init__ bad argument - " + "data should be numpy array") self.data = data if (self.data): self.dims = self.data.shape self.clabels = clabels self.rlabels = rlabels if (fname): self.read(fname) def read(self, fname=None, frame=None): """ To be overridden by format specific subclasses """ raise Exception("Class has not implemented read method yet") # import stuff from Jon's columnfile things class columnfile(fabiodata): "Concrete fabiodata class" def read(self, fname, frame=None): from .ext import cf_io try: infile = open(fname, 'rb') except: raise Exception("columnfile: file" + str(fname) + "not found.") try: (self.data, self.clabels) = cf_io.read(infile) except: raise Exception("columnfile: read error, file " + str(fname) + " possibly corrupt") self.dims = self.data.shape infile.close() fabio-0.6.0/fabio/cbfimage.py0000644001611600070440000007766013227357030017121 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: FabIO X-ray image reader # # Copyright (C) 2010-2016 European Synchrotron Radiation Facility # Grenoble, France # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ Authors: Jérôme Kieffer, ESRF email:jerome.kieffer@esrf.fr Cif Binary Files images are 2D images written by the Pilatus detector and others. They use a modified (simplified) byte-offset algorithm. CIF is a library for manipulating Crystallographic information files and tries to conform to the specification of the IUCR """ # get ready for python3 from __future__ import with_statement, print_function, absolute_import __author__ = "Jérôme Kieffer" __contact__ = "jerome.kieffer@esrf.eu" __license__ = "MIT" __date__ = "11/08/2017" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __version__ = ["Generated by CIF.py: Jan 2005 - Oct 2015", "Written by Jerome Kieffer: Jerome.Kieffer@esrf.eu", "On-line data analysis / ISDD ", "ESRF Grenoble (France)"] import os import logging import numpy from .fabioimage import FabioImage from .compression import compByteOffset, decByteOffset, md5sum, six from .ext._cif import split_tokens logger = logging.getLogger(__name__) DATA_TYPES = {"signed 8-bit integer" : "int8", "signed 16-bit integer" : "int16", "signed 32-bit integer" : "int32", "signed 64-bit integer" : "int64", "unsigned 8-bit integer" : "uint8", "unsigned 16-bit integer": "uint16", "unsigned 32-bit integer": "uint32", "unsigned 64-bit integer": "uint64" } MINIMUM_KEYS = ["X-Binary-Size-Fastest-Dimension", "X-Binary-Size-Second-Dimension", "X-Binary-Size", "X-Binary-Number-of-Elements", 'X-Binary-Element-Type', 'X-Binary-Number-of-Elements'] class CbfImage(FabioImage): """ Read the Cif Binary File data format """ DESCRIPTION = "Cif Binary Files format (used by the Pilatus detectors and others)" DEFAULT_EXTENSIONS = ["cbf"] STARTER = b"\x0c\x1a\x04\xd5" PADDING = 512 BINARAY_SECTION = b"--CIF-BINARY-FORMAT-SECTION--" CIF_BINARY_BLOCK_KEY = "_array_data.data" def __init__(self, data=None, header=None, fname=None): """ Constructor of the class CIF Binary File reader. :param str fname: the name of the file to open """ FabioImage.__init__(self, data, header) self.cif = CIF() self.cbs = None self.start_binary = None if fname is not None: # load the file) self.read(fname) @staticmethod def checkData(data=None): if data is None: return None elif numpy.issubdtype(data.dtype, int): return data else: return data.astype(int) def _readheader(self, inStream): """ Read in a header in some CBF format from a string representing binary stuff :param file inStream: file containing the Cif Binary part. """ self._read_cif_header(inStream) self._read_binary_section_header(inStream) def _read_cif_header(self, inStream): """Read in a ASCII CIF header :param inStream: file containing the Cif Binary part. :type inStream: opened file. """ blocks = [] last = "" header_data = None for i in range(16): # up to 512*16 = 8k headers ablock = inStream.read(self.PADDING) blocks.append(ablock) if last: extra = len(self.BINARAY_SECTION) extblock = last[-extra:] + ablock else: extra = 0 extblock = ablock res = extblock.find(self.BINARAY_SECTION) if res >= 0: start_cbs = i * self.PADDING - extra + res all_blocks = b"".join(blocks) header_data = all_blocks[:start_cbs] + b"CIF Binary Section\n;\n" self.cbs = all_blocks[start_cbs:] break last = ablock else: header_data = b"".join(blocks) + inStream.read() self.cif._parseCIF(header_data) # backport contents of the CIF data to the headers for key, value in self.cif.items(): if key == self.CIF_BINARY_BLOCK_KEY: if self.cbs is None: self.cbs = value else: self.header[key] = (self.cif[key].strip(" \"\n\r\t")) def _read_binary_section_header(self, inStream): """ Read the binary section header """ self.start_binary = self.cbs.find(self.STARTER) while self.start_binary < 0: self.cbs += inStream.read(self.PADDING) self.start_binary = self.cbs.find(self.STARTER) bin_headers = self.cbs[:self.start_binary] lines = bin_headers.split(b"\n") for line in lines[1:]: if len(line) < 10: break try: key, val = line.split(b':', 1) except ValueError: key, val = line.split(b'=', 1) key = key.strip().decode("ASCII") self.header[key] = val.strip(b" \"\n\r\t").decode("ASCII") missing = [] for item in MINIMUM_KEYS: if item not in self.header: missing.append(item) if missing: logger.info("Mandatory keys missing in CBF file: " + ", ".join(missing)) # Compute image size try: self.dim1 = int(self.header['X-Binary-Size-Fastest-Dimension']) self.dim2 = int(self.header['X-Binary-Size-Second-Dimension']) except (KeyError, ValueError): raise IOError("CBF file %s is corrupt, no dimensions in it" % inStream.name) try: self.bytecode = DATA_TYPES[self.header['X-Binary-Element-Type']] except KeyError: self.bytecode = "int32" logger.warning("Defaulting type to int32") self.bpp = numpy.dtype(self.bytecode).itemsize def read_raw_data(self, infile): """Read and return the raw data chunk :param infile: opened file are correct position :return: raw compressed stream """ if self.CIF_BINARY_BLOCK_KEY not in self.cif: err = "Not key %s in CIF, no CBF image in %s" % (self.CIF_BINARY_BLOCK_KEY, self.filename) logger.error(err) for kv in self.cif.items(): logger.debug("%s: %s", kv) raise RuntimeError(err) if self.cif[self.CIF_BINARY_BLOCK_KEY] == "CIF Binary Section": self.cbs += infile.read(len(self.STARTER) + int(self.header["X-Binary-Size"]) - len(self.cbs) + self.start_binary) else: if len(self.cif[self.CIF_BINARY_BLOCK_KEY]) > int(self.header["X-Binary-Size"]) + self.start_binary + len(self.STARTER): self.cbs = self.cif[self.CIF_BINARY_BLOCK_KEY][:int(self.header["X-Binary-Size"]) + self.start_binary + len(self.STARTER)] else: self.cbs = self.cif[self.CIF_BINARY_BLOCK_KEY] return self.cbs[self.start_binary + len(self.STARTER):] def read(self, fname, frame=None, check_MD5=True, only_raw=False): """Read in header into self.header and the data into self.data :param str fname: name of the file :return: fabioimage instance """ self.filename = fname self.header = self.check_header() self.resetvals() infile = self._open(fname, "rb") self._readheader(infile) logger.debug("CBS type %s len %s" % (type(self.cbs), len(self.cbs))) binary_data = self.read_raw_data(infile) if only_raw: return binary_data if ("Content-MD5" in self.header) and check_MD5: ref = numpy.string_(self.header["Content-MD5"]) obt = md5sum(binary_data) if ref != obt: logger.error("Checksum of binary data mismatch: expected %s, got %s" % (ref, obt)) if self.header["conversions"] == "x-CBF_BYTE_OFFSET": self.data = numpy.ascontiguousarray(self._readbinary_byte_offset(binary_data,), self.bytecode).reshape((self.dim2, self.dim1)) else: raise Exception(IOError, "Compression scheme not yet supported, please contact the author") self.resetvals() # # ensure the PIL image is reset self.pilimage = None return self def _readbinary_byte_offset(self, raw_bytes): """ Read in a binary part of an x-CBF_BYTE_OFFSET compressed image :param str inStream: the binary image (without any CIF decorators) :return: a linear numpy array without shape and dtype set :rtype: numpy array """ myData = decByteOffset(raw_bytes, size=self.dim1 * self.dim2, dtype=self.bytecode) assert len(myData) == self.dim1 * self.dim2 return myData def write(self, fname): """ write the file in CBF format :param str fname: name of the file """ if self.data is not None: self.dim2, self.dim1 = self.data.shape else: raise RuntimeError("CBF image contains no data") binary_blob = compByteOffset(self.data) dtype = "Unknown" for key, value in DATA_TYPES.items(): if value == self.data.dtype: dtype = key binary_block = [b"--CIF-BINARY-FORMAT-SECTION--", b"Content-Type: application/octet-stream;", b' conversions="x-CBF_BYTE_OFFSET"', b'Content-Transfer-Encoding: BINARY', numpy.string_("X-Binary-Size: %d" % (len(binary_blob))), b"X-Binary-ID: 1", numpy.string_('X-Binary-Element-Type: "%s"' % (dtype)), b"X-Binary-Element-Byte-Order: LITTLE_ENDIAN", b"Content-MD5: " + md5sum(binary_blob), numpy.string_("X-Binary-Number-of-Elements: %d" % (self.dim1 * self.dim2)), numpy.string_("X-Binary-Size-Fastest-Dimension: %d" % self.dim1), numpy.string_("X-Binary-Size-Second-Dimension: %d" % self.dim2), b"X-Binary-Size-Padding: 1", b"", self.STARTER + binary_blob, b"", b"--CIF-BINARY-FORMAT-SECTION----"] if "_array_data.header_contents" not in self.header: nonCifHeaders = [] else: nonCifHeaders = [i.strip()[2:] for i in self.header["_array_data.header_contents"].split("\n") if i.find("# ") >= 0] for key in self.header: if key.startswith("_"): if key not in self.cif or self.cif[key] != self.header[key]: self.cif[key] = self.header[key] elif key.startswith("X-Binary-"): pass elif key.startswith("Content-"): pass elif key.startswith("conversions"): pass elif key.startswith("filename"): pass elif key in self.header: nonCifHeaders.append("%s %s" % (key, self.header[key])) if len(nonCifHeaders) > 0: self.cif["_array_data.header_contents"] = "\r\n".join(["# %s" % i for i in nonCifHeaders]) self.cbf = b"\r\n".join(binary_block) block = b"\r\n".join([b"", self.CIF_BINARY_BLOCK_KEY.encode("ASCII"), b";", self.cbf, b";"]) self.cif.pop(self.CIF_BINARY_BLOCK_KEY, None) with open(fname, "wb") as out_file: out_file.write(self.cif.tostring(fname, "\r\n").encode("ASCII")) out_file.write(block) ################################################################################ # CIF class ################################################################################ class CIF(dict): """ This is the CIF class, it represents the CIF dictionary; and as a a python dictionary thus inherits from the dict built in class. keys are always unicode (str in python3) values are bytes """ EOL = [numpy.string_(i) for i in ("\r", "\n", "\r\n", "\n\r")] BLANK = [numpy.string_(i) for i in (" ", "\t")] + EOL SINGLE_QUOTE = numpy.string_("'") DOUBLE_QUOTE = numpy.string_('"') SEMICOLUMN = numpy.string_(';') START_COMMENT = (SINGLE_QUOTE, DOUBLE_QUOTE) BINARY_MARKER = numpy.string_("--CIF-BINARY-FORMAT-SECTION--") HASH = numpy.string_("#") LOOP = numpy.string_("loop_") UNDERSCORE = ord("_") if six.PY3 else b"_" QUESTIONMARK = ord("?") if six.PY3 else b"?" STOP = numpy.string_("stop_") GLOBAL = numpy.string_("global_") DATA = numpy.string_("data_") SAVE = numpy.string_("save_") def __init__(self, _strFilename=None): """ Constructor of the class. :param _strFilename: the name of the file to open :type _strFilename: filename (str) or file object """ dict.__init__(self) self._ordered = [] if _strFilename is not None: # load the file) self.loadCIF(_strFilename) def __setitem__(self, key, value): if key not in self._ordered: self._ordered.append(key) return dict.__setitem__(self, key, value) def pop(self, key, default=None): if key in self._ordered: self._ordered.remove(key) return dict.pop(self, key, default) def popitem(self, key, default=None): if key in self._ordered: self._ordered.remove(key) return dict.popitem(self, key, None) def loadCIF(self, _strFilename, _bKeepComment=False): """Load the CIF file and populates the CIF dictionary into the object :param str _strFilename: the name of the file to open :return: None """ own_fd = False if isinstance(_strFilename, (six.binary_type, six.text_type)): if os.path.isfile(_strFilename): infile = open(_strFilename, "rb") own_fd = True else: raise RuntimeError("CIF.loadCIF: No such file to open: %s" % _strFilename) elif "read" in dir(_strFilename): infile = _strFilename else: raise RuntimeError("CIF.loadCIF: what is %s type %s" % (_strFilename, type(_strFilename))) if _bKeepComment: self._parseCIF(numpy.string_(infile.read())) else: self._parseCIF(CIF._readCIF(infile)) if own_fd: infile.close() readCIF = loadCIF @staticmethod def isAscii(text): """ Check if all characters in a string are ascii, :param str text: input string :return: boolean :rtype: boolean """ try: text.decode("ascii") except UnicodeDecodeError: return False else: return True @classmethod def _readCIF(cls, instream): """ - Check if the filename containing the CIF data exists - read the cif file - removes the comments :param file instream: opened file object containing the CIF data :return: a set of bytes (8-bit string) containing the raw data :rtype: string """ if "read" not in dir(instream): raise RuntimeError("CIF._readCIF(instream): I expected instream to be an opened file,\ here I got %s type %s" % (instream, type(instream))) out_bytes = numpy.string_("") for sLine in instream: nline = numpy.string_(sLine) pos = nline.find(cls.HASH) if pos >= 0: if cls.isAscii(nline): out_bytes += nline[:pos] + numpy.string_(os.linesep) if pos > 80: logger.warning("This line is too long and could cause problems in PreQuest: %s", sLine) else: out_bytes += nline if len(sLine.strip()) > 80: logger.warning("This line is too long and could cause problems in PreQuest: %s", sLine) return out_bytes def _parseCIF(self, bytes_text): """ - Parses the text of a CIF file - Cut it in fields - Find all the loops and process - Find all the keys and values :param bytes_text: the content of the CIF - file :type bytes_text: 8-bit string (str in python2 or bytes in python3) :return: Nothing, the data are incorporated at the CIF object dictionary :rtype: None """ loopidx = [] looplen = [] loop = [] fields = split_tokens(bytes_text) logger.debug("After split got %s fields of len: %s", len(fields), [len(i) for i in fields]) for idx, field in enumerate(fields): if field.lower() == self.LOOP: loopidx.append(idx) if loopidx: for i in loopidx: loopone, length, keys = CIF._analyseOneLoop(fields, i) loop.append([keys, loopone]) looplen.append(length) for i in range(len(loopidx) - 1, -1, -1): f1 = fields[:loopidx[i]] + fields[loopidx[i] + looplen[i]:] fields = f1 self[self.LOOP.decode("ASCII")] = loop for i in range(len(fields) - 1): if len(fields[i + 1]) == 0: fields[i + 1] = self.QUESTIONMARK if fields[i][0] == self.UNDERSCORE and fields[i + 1][0] != self.UNDERSCORE: try: data = fields[i + 1].decode("ASCII") except UnicodeError: logger.warning("Unable to decode in ascii: %s" % fields[i + 1]) data = fields[i + 1] self[(fields[i]).decode("ASCII")] = data @classmethod def _splitCIF(cls, bytes_text): """ Separate the text in fields as defined in the CIF :param bytes_text: the content of the CIF - file :type bytes_text: 8-bit string (str in python2 or bytes in python3) :return: list of all the fields of the CIF :rtype: list """ fields = [] while True: if len(bytes_text) == 0: break elif bytes_text[0] == cls.SINGLE_QUOTE: idx = 0 finished = False while not finished: idx += 1 + bytes_text[idx + 1:].find(cls.SINGLE_QUOTE) if idx >= len(bytes_text) - 1: fields.append(bytes_text[1:-1].strip()) bytes_text = numpy.string_("") finished = True break if bytes_text[idx + 1] in cls.BLANK: fields.append(bytes_text[1:idx].strip()) tmp_text = bytes_text[idx + 1:] bytes_text = tmp_text.strip() finished = True elif bytes_text[0] == cls.DOUBLE_QUOTE: idx = 0 finished = False while not finished: idx += 1 + bytes_text[idx + 1:].find(cls.DOUBLE_QUOTE) if idx >= len(bytes_text) - 1: fields.append(bytes_text[1:-1].strip()) bytes_text = numpy.string_("") finished = True break if bytes_text[idx + 1] in cls.BLANK: fields.append(bytes_text[1:idx].strip()) tmp_text = bytes_text[idx + 1:] bytes_text = tmp_text.strip() finished = True elif bytes_text[0] == cls.SEMICOLUMN: if bytes_text[1:].strip().find(cls.BINARY_MARKER) == 0: idx = bytes_text[32:].find(cls.BINARY_MARKER) if idx == -1: idx = 0 else: idx += 32 + len(cls.BINARY_MARKER) else: idx = 0 finished = False while not finished: idx += 1 + bytes_text[idx + 1:].find(cls.SEMICOLUMN) if bytes_text[idx - 1] in cls.EOL: fields.append(bytes_text[1:idx - 1].strip()) tmp_text = bytes_text[idx + 1:] bytes_text = tmp_text.strip() finished = True else: res = bytes_text.split(None, 1) if len(res) == 2: first, second = bytes_text.split(None, 1) if cls.isAscii(first): fields.append(first) bytes_text = second.strip() continue start_binary = bytes_text.find(cls.BINARY_MARKER) if start_binary > 0: end_binary = bytes_text[start_binary + 1:].find(cls.BINARY_MARKER) + start_binary + 1 + len(cls.BINARY_MARKER) fields.append(bytes_text[:end_binary]) bytes_text = bytes_text[end_binary:].strip() else: fields.append(bytes_text) bytes_text = numpy.string_("") break return fields @classmethod def _analyseOneLoop(cls, fields, start_idx): """Processes one loop in the data extraction of the CIF file :param list fields: list of all the words contained in the cif file :param int start_idx: the starting index corresponding to the "loop_" key :return: the list of loop dictionaries, the length of the data extracted from the fields and the list of all the keys of the loop. :rtype: tuple """ loop = [] keys = [] i = start_idx + 1 finished = False while not finished: if fields[i][0] == cls.UNDERSCORE: keys.append(fields[i]) i += 1 else: finished = True data = [] while True: if i >= len(fields): break elif len(fields[i]) == 0: break elif fields[i][0] == cls.UNDERSCORE: break elif fields[i] in (cls.LOOP, cls.STOP, cls.GLOBAL, cls.DATA, cls.SAVE): break else: data.append(fields[i]) i += 1 k = 0 if len(data) < len(keys): element = {} for j in keys: if k < len(data): element[j] = data[k] else: element[j] = cls.QUESTIONMARK k += 1 loop.append(element) else: for i in range(len(data) / len(keys)): element = {} for j in keys: element[j] = data[k] k += 1 loop.append(element) return loop, 1 + len(keys) + len(data), keys ########################################## # everything needed to write a CIF file # ########################################## def saveCIF(self, _strFilename="test.cif", linesep=os.linesep, binary=False): """Transforms the CIF object in string then write it into the given file :param _strFilename: the of the file to be written :param linesep: line separation used (to force compatibility with windows/unix) :param binary: Shall we write the data as binary (True only for imageCIF/CBF) :return: None """ if binary: mode = "wb" else: mode = "w" try: fFile = open(_strFilename, mode) except IOError: logger.error("Error during the opening of file for write: %s", _strFilename) return fFile.write(self.tostring(_strFilename, linesep)) try: fFile.close() except IOError: logger.error("Error during the closing of file for write: %s", _strFilename) def tostring(self, _strFilename=None, linesep=os.linesep): """ Converts a cif dictionnary to a string according to the CIF syntax. :param str _strFilename: the name of the filename to be appended in the header of the CIF file. :param linesep: default line separation (can be '\\n' or '\\r\\n'). :return: a string that corresponds to the content of the CIF-file. """ lstStrCif = ["# " + i for i in __version__] if "_chemical_name_common" in self: t = self["_chemical_name_common"].split()[0] elif _strFilename is not None: t = os.path.splitext(os.path.split(str(_strFilename).strip())[1])[0] else: t = "" lstStrCif.append("data_%s" % (t)) # first of all get all the keys: lKeys = list(self.keys()) lKeys.sort() for key in lKeys[:]: if key in self._ordered: lKeys.remove(key) self._ordered += lKeys for sKey in self._ordered: if sKey == "loop_": continue if sKey not in self: self._ordered.remove(sKey) logger.debug("Skipping key %s from ordered list as no more present in dict") continue sValue = str(self[sKey]) if sValue.find("\n") > -1: # should add value between ;; lLine = [sKey, ";", sValue, ";", ""] elif len(sValue.split()) > 1: # should add value between '' sLine = "%s '%s'" % (sKey, sValue) if len(sLine) > 80: lLine = [str(sKey), sValue] else: lLine = [sLine] else: sLine = "%s %s" % (sKey, sValue) if len(sLine) > 80: lLine = [str(sKey), sValue] else: lLine = [sLine] lstStrCif += lLine if "loop_" in self: for loop in self["loop_"]: lstStrCif.append("loop_ ") lKeys = loop[0] llData = loop[1] lstStrCif += [" %s" % (sKey) for sKey in lKeys] for lData in llData: sLine = " " for key in lKeys: sRawValue = lData[key] if sRawValue.find("\n") > -1: # should add value between ;; lstStrCif += [sLine, ";", str(sRawValue), ";"] sLine = " " else: if len(sRawValue.split()) > 1: # should add value between '' value = "'%s'" % (sRawValue) else: value = str(sRawValue) if len(sLine) + len(value) > 78: lstStrCif += [sLine] sLine = " " + value else: sLine += " " + value lstStrCif.append(sLine) lstStrCif.append("") return linesep.join(lstStrCif) def exists(self, sKey): """ Check if the key exists in the CIF and is non empty. :param str sKey: CIF key :param cif: CIF dictionary :return: True if the key exists in the CIF dictionary and is non empty :rtype: boolean """ bExists = False if sKey in self: if len(self[sKey]) >= 1: if self[sKey][0] not in (self.QUESTIONMARK, numpy.string_(".")): bExists = True return bExists def existsInLoop(self, sKey): """ Check if the key exists in the CIF dictionary. :param str sKey: CIF key :param cif: CIF dictionary :return: True if the key exists in the CIF dictionary and is non empty :rtype: boolean """ if not self.exists(self.LOOP): return False bExists = False if not bExists: for i in self[self.LOOP]: for j in i[0]: if j == sKey: bExists = True return bExists def loadCHIPLOT(self, _strFilename): """ Load the powder diffraction CHIPLOT file and returns the pd_CIF dictionary in the object :param str _strFilename: the name of the file to open :return: the CIF object corresponding to the powder diffraction :rtype: dictionary """ if not os.path.isfile(_strFilename): errStr = "I cannot find the file %s" % _strFilename logger.error(errStr) raise IOError(errStr) lInFile = open(_strFilename, "r").readlines() self["_audit_creation_method"] = 'From 2-D detector using FIT2D and CIFfile' self["_pd_meas_scan_method"] = "fixed" self["_pd_spec_description"] = lInFile[0].strip() try: iLenData = int(lInFile[3]) except ValueError: iLenData = None lOneLoop = [] try: f2ThetaMin = float(lInFile[4].split()[0]) last = "" for sLine in lInFile[-20:]: if sLine.strip() != "": last = sLine.strip() f2ThetaMax = float(last.split()[0]) limitsOK = True except (ValueError, IndexError): limitsOK = False f2ThetaMin = 180.0 f2ThetaMax = 0 # print "limitsOK:", limitsOK for sLine in lInFile[4:]: sCleaned = sLine.split("#")[0].strip() data = sCleaned.split() if len(data) == 2: if not limitsOK: f2Theta = float(data[0]) if f2Theta < f2ThetaMin: f2ThetaMin = f2Theta if f2Theta > f2ThetaMax: f2ThetaMax = f2Theta lOneLoop.append({"_pd_meas_intensity_total": data[1]}) if not iLenData: iLenData = len(lOneLoop) assert (iLenData == len(lOneLoop)) self["_pd_meas_2theta_range_inc"] = "%.4f" % ((f2ThetaMax - f2ThetaMin) / (iLenData - 1)) if self["_pd_meas_2theta_range_inc"] < 0: self["_pd_meas_2theta_range_inc"] = abs(self["_pd_meas_2theta_range_inc"]) tmp = f2ThetaMax f2ThetaMax = f2ThetaMin f2ThetaMin = tmp self["_pd_meas_2theta_range_max"] = "%.4f" % f2ThetaMax self["_pd_meas_2theta_range_min"] = "%.4f" % f2ThetaMin self["_pd_meas_number_of_points"] = str(iLenData) self[self.LOOP] = [[["_pd_meas_intensity_total"], lOneLoop]] @staticmethod def LoopHasKey(loop, key): "Returns True if the key (string) exist in the array called loop""" try: loop.index(key) return True except ValueError: return False cbfimage = CbfImage fabio-0.6.0/fabio/utils/0000755001611600070440000000000013227375744016146 5ustar kiefferscisoft00000000000000fabio-0.6.0/fabio/utils/__init__.py0000644001611600070440000000000013227357030020231 0ustar kiefferscisoft00000000000000fabio-0.6.0/fabio/utils/pilutils.py0000644001611600070440000000770713227357030020364 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """Helper functions using Python Imaging Library (PIL) """ __authors__ = ["Jérôme Kieffer", "Jon Wright"] __date__ = "27/07/2017" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __status__ = "stable" import logging import numpy logger = logging.getLogger(__name__) try: from PIL import Image except ImportError: Image = None PIL_TO_NUMPY = { "I;8": numpy.uint8, "I;16": numpy.uint16, "I;16B": numpy.uint16, # big endian "I;16L": numpy.uint16, # little endian "I;32": numpy.uint32, "I;32L": numpy.uint32, # little endian "I;32B": numpy.uint32, # big endian "F;32F": numpy.float32, "F;32BF": numpy.float32, # big endian "F;64F": numpy.float64, "F;64BF": numpy.float64, # big endian "F": numpy.float32, "1": numpy.bool, "I": numpy.int32, "L": numpy.uint8, } NUMPY_TO_PIL = { 'float32': "F", 'int32': "F;32NS", 'uint32': "F;32N", 'int16': "F;16NS", 'uint16': "F;16N", 'int8': "F;8S", 'uint8': "F;8" } def get_numpy_array(pil_image): """ Returns a numpy array from a PIL image :param PIL.Image pil_image: A PIL Image object """ dim1, dim2 = pil_image.size if pil_image.mode in PIL_TO_NUMPY: dtype = PIL_TO_NUMPY[pil_image.mode] else: dtype = numpy.float32 pil_image = pil_image.convert("F") try: data = numpy.asarray(pil_image, dtype) except: # PIL does not support buffer interface (yet) if hasattr(pil_image, "tobytes"): data = numpy.fromstring(pil_image.tobytes(), dtype=dtype) else: data = numpy.fromstring(pil_image.tostring(), dtype=dtype) # byteswap ? if numpy.dtype(dtype).itemsize > 1: need_swap = False need_swap |= numpy.little_endian and "B" in pil_image.mode need_swap |= not numpy.little_endian and pil_image.mode.endswith("L") if need_swap: data.byteswap(True) data = data.reshape((dim2, dim1)) return data def create_pil_16(numpy_array): """ Convert a numpy array to a Python Imaging Library 16 bit greyscale image. :param numpy.ndarray numpy_array: A numpy array """ if Image is None: raise ImportError("PIL is not installed") size = numpy_array.shape[:2][::-1] if numpy_array.dtype.name in NUMPY_TO_PIL: mode2 = NUMPY_TO_PIL[numpy_array.dtype.name] mode1 = mode2[0] else: raise RuntimeError("Unknown numpy type: %s" % (numpy_array.dtype.type)) dats = numpy_array.tostring() pil_image = Image.frombuffer(mode1, size, dats, "raw", mode2, 0, 1) return pil_image fabio-0.6.0/fabio/utils/setup.py0000644001611600070440000000320313227357030017642 0ustar kiefferscisoft00000000000000# coding: utf-8 # /*########################################################################## # Copyright (C) 2016 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ############################################################################*/ __authors__ = ["V. Valls"] __license__ = "MIT" __date__ = "31/07/2017" from numpy.distutils.misc_util import Configuration def configuration(parent_package='', top_path=None): config = Configuration('utils', parent_package, top_path) return config if __name__ == "__main__": from numpy.distutils.core import setup setup(configuration=configuration) fabio-0.6.0/fabio/utils/mathutils.py0000644001611600070440000000362613227357030020525 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """Math function which can be useful on the full project """ import numpy def naive_rad2deg(x): """ Naive implementation of radiuan to degree. Useful for very old numpy (v1.0.1 on MacOSX from Risoe) """ return 180.0 * x / numpy.pi def naive_deg2rad(x): """ Naive implementation of degree to radiuan. Useful for very old numpy (v1.0.1 on MacOSX from Risoe) """ return x * numpy.pi / 180. try: from numpy import rad2deg, deg2rad except ImportError: # naive implementation for very old numpy (v1.0.1 on MacOSX from Risoe) rad2deg = naive_deg2rad deg2rad = naive_deg2rad fabio-0.6.0/fabio/directories.py0000644001611600070440000000431713227357030017665 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION W """FabIO module: Contains the directory with test-images""" __author__ = "Jérôme Kieffer" __contact__ = "Jerome.Kieffer@ESRF.eu" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __date__ = "24/07/2017" __status__ = "stable" import os import getpass import tempfile import logging logger = logging.getLogger(__name__) SHARED_TESTIMAGES = "/usr/share/fabio/testimages" # testimages contains the directory name where test images are located testimages = None if "FABIO_TESTIMAGES" in os.environ: testimages = os.environ.get("FABIO_TESTIMAGES") if not os.path.exists(testimages): logger.warning("testimage directory %s does not exist" % testimages) elif os.path.isdir(SHARED_TESTIMAGES): testimages = SHARED_TESTIMAGES else: # create a temporary folder testimages = os.path.join(tempfile.gettempdir(), "fabio_testimages_%s" % (getpass.getuser())) if not os.path.exists(testimages): os.makedirs(testimages) fabio-0.6.0/fabio/nexus.py0000644001611600070440000003310313227357030016506 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: FabIO X-ray image reader # # Copyright (C) 2010-2016 European Synchrotron Radiation Facility # Grenoble, France # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """Module for handling HDF5 data structure following the NeXuS convention Stand-alone module which tries to offer interface to HDF5 via H5Py """ from __future__ import absolute_import, print_function, division __author__ = "Jerome Kieffer" __contact__ = "Jerome.Kieffer@ESRF.eu" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __date__ = "24/07/2017" __status__ = "beta" __docformat__ = 'restructuredtext' import logging import numpy import os import sys import time from .fabioutils import exists from ._version import version if sys.version_info[0] < 3: bytes = str from urlparse import urlparse else: from urllib.parse import urlparse logger = logging.getLogger(__name__) try: import h5py except ImportError as error: h5py = None logger.error("h5py module missing") else: try: h5py._errors.silence_errors() except AttributeError: # old h5py pass def get_isotime(force_time=None): """ :param force_time: enforce a given time (current by default) :type force_time: float :return: the current time as an ISO8601 string :rtype: string """ if force_time is None: force_time = time.time() localtime = time.localtime(force_time) gmtime = time.gmtime(force_time) tz_h = localtime.tm_hour - gmtime.tm_hour tz_m = localtime.tm_min - gmtime.tm_min return "%s%+03i:%02i" % (time.strftime("%Y-%m-%dT%H:%M:%S", localtime), tz_h, tz_m) def from_isotime(text, use_tz=False): """ :param text: string representing the time is iso format :return: Time in second since epoch (float) """ if isinstance(text, numpy.ndarray): text = text[0] if (sys.version_info[0] > 2) and isinstance(text, bytes): text = text.decode("utf-8") else: text = str(text) base = text[:19] if use_tz and len(text) == 25: sgn = 1 if text[:19] == "+" else -1 tz = 60 * (60 * int(text[20:22]) + int(text[23:25])) * sgn else: tz = 0 return time.mktime(time.strptime(base, "%Y-%m-%dT%H:%M:%S")) + tz def is_hdf5(filename): """ Check if a file is actually a HDF5 file :param filename: this file has better to exist :return: true or False """ signature = b"\x89\x48\x44\x46\x0d\x0a\x1a\x0a" if not exists(filename): raise IOError("No such file %s" % (filename)) with open(filename.split("::")[0], "rb") as f: sig = f.read(len(signature)) return sig == signature class Nexus(object): """ Writer class to handle Nexus/HDF5 data Manages: entry pyFAI-subentry detector #TODO: make it thread-safe !!! """ def __init__(self, filename, mode="r"): """ Constructor :param filename: name of the hdf5 file containing the nexus :param mode: can be r or a """ self.filename = os.path.abspath(filename) self.mode = mode if not h5py: logger.error("h5py module missing: NeXus not supported") raise RuntimeError("H5py module is missing") if exists(self.filename) and self.mode == "r": self.h5 = h5py.File(self.filename.split("::")[0], mode=self.mode) else: self.h5 = h5py.File(self.filename.split("::")[0]) self.to_close = [] def close(self, endtime=None): """Close the filename and update all entries :param endtime: timestamp in iso-format of the end of the acquisition. """ if endtime is None: end_time = get_isotime() elif isinstance(endtime, (int, float)): end_time = get_isotime(endtime) for entry in self.to_close: entry["end_time"] = end_time self.h5.close() # Context manager for "with" statement compatibility def __enter__(self, *arg, **kwarg): return self def __exit__(self, *arg, **kwarg): self.close() def get_entry(self, name): """ Retrieves an entry from its name :param name: name of the entry to retrieve :return: HDF5 group of NXclass == NXentry """ for grp_name in self.h5: if grp_name == name: grp = self.h5[grp_name] if (isinstance(grp, h5py.Group) and "start_time" in grp and "NX_class" in grp.attrs and grp.attrs["NX_class"] == "NXentry"): return grp def get_entries(self): """ retrieves all entry sorted the latest first. :return: list of HDF5 groups """ entries = [(grp, from_isotime(self.h5[grp + "/start_time"].value)) for grp in self.h5 if (isinstance(self.h5[grp], h5py.Group) and "start_time" in self.h5[grp] and "NX_class" in self.h5[grp].attrs and self.h5[grp].attrs["NX_class"] == "NXentry")] if entries: entries.sort(key=lambda a: a[1], reverse=True) # sort entries in decreasing time return [self.h5[i[0]] for i in entries] else: # no entries found, try without sorting by time entries = [grp for grp in self.h5 if (isinstance(self.h5[grp], h5py.Group) and "NX_class" in self.h5[grp].attrs and self.h5[grp].attrs["NX_class"] == "NXentry")] entries.sort(reverse=True) return [self.h5[i] for i in entries] def find_detector(self, all=False): """ Tries to find a detector within a NeXus file, takes the first compatible detector :param all: return all detectors found as a list """ result = [] for entry in self.get_entries(): for instrument in self.get_class(entry, "NXsubentry") + self.get_class(entry, "NXinstrument"): for detector in self.get_class(instrument, "NXdetector"): if all: result.append(detector) else: return detector return result def find_data(self, all=False): """ Tries to find a NXdata within a NeXus file :param all: return all detectors found as a list """ result = [] for entry in self.get_entries(): data = self.get_data(entry) if data: if all: result += data else: return data[0] for instrument in self.get_class(entry, "NXinstrument"): data = self.get_data(instrument) if data: if all: result += data else: return data[0] for detector in self.get_class(instrument, "NXdetector"): data = self.get_data(detector) if data: if all: result += data else: return data[0] for instrument in self.get_class(entry, "NXsubentry"): data = self.get_data(instrument) if data: if all: result += data else: return data[0] for detector in self.get_class(instrument, "NXdetector"): data = self.get_data(detector) if data: if all: result += data else: return data[0] return result def new_entry(self, entry="entry", program_name="pyFAI", title="description of experiment", force_time=None, force_name=False): """ Create a new entry :param entry: name of the entry :param program_name: value of the field as string :param title: value of the field as string :param force_time: seconds since epoch enforce the start_time :param force_name: set to true to prevent the addition of a _0001 suffix :return: the corresponding HDF5 group """ if force_name: entry_grp = self.h5.require_group(entry) else: nb_entries = len(self.get_entries()) entry_grp = self.h5.require_group("%s_%04i" % (entry, nb_entries)) entry_grp.attrs["NX_class"] = "NXentry" entry_grp["title"] = numpy.string_(title) entry_grp["program_name"] = numpy.string_(program_name) entry_grp["start_time"] = numpy.string_(get_isotime(force_time)) self.to_close.append(entry_grp) return entry_grp def new_instrument(self, entry="entry", instrument_name="id00",): """ Create an instrument in an entry or create both the entry and the instrument if """ if not isinstance(entry, h5py.Group): entry = self.new_entry(entry) return self.new_class(entry, instrument_name, "NXinstrument") # howto external link # myfile['ext link'] = h5py.ExternalLink("otherfile.hdf5", "/path/to/resource") def new_class(self, grp, name, class_type="NXcollection"): """ create a new sub-group with type class_type :param grp: parent group :param name: name of the sub-group :param class_type: NeXus class name :return: subgroup created """ sub = grp.require_group(name) sub.attrs["NX_class"] = class_type return sub def new_detector(self, name="detector", entry="entry", subentry="pyFAI"): """ Create a new entry/pyFAI/Detector :param detector: name of the detector :param entry: name of the entry :param subentry: all pyFAI description of detectors should be in a pyFAI sub-entry """ entry_grp = self.new_entry(entry) pyFAI_grp = self.new_class(entry_grp, subentry, "NXsubentry") pyFAI_grp["definition_local"] = numpy.string_("pyFAI") pyFAI_grp["definition_local"].attrs["version"] = version det_grp = self.new_class(pyFAI_grp, name, "NXdetector") return det_grp def get_class(self, grp, class_type="NXcollection"): """ return all sub-groups of the given type within a group :param grp: HDF5 group :param class_type: name of the NeXus class """ coll = [grp[name] for name in grp if (isinstance(grp[name], h5py.Group) and "NX_class" in grp[name].attrs and grp[name].attrs["NX_class"] == class_type)] return coll def get_data(self, grp, class_type="NXdata"): """ return all dataset of the the NeXus class NXdata :param grp: HDF5 group :param class_type: name of the NeXus class """ result = [] for grp in self.get_class(grp, class_type): result += [grp[name] for name in grp if (isinstance(grp[name], h5py.Dataset) and ("signal" in grp[name].attrs))] return result def deep_copy(self, name, obj, where="/", toplevel=None, excluded=None, overwrite=False): """ perform a deep copy: create a "name" entry in self containing a copy of the object :param where: path to the toplevel object (i.e. root) :param toplevel: firectly the top level Group :param excluded: list of keys to be excluded :param overwrite: replace content if already existing """ if (excluded is not None) and (name in excluded): return if not toplevel: toplevel = self.h5[where] if isinstance(obj, h5py.Group): if name not in toplevel: grp = toplevel.require_group(name) for k, v in obj.attrs.items(): grp.attrs[k] = v elif isinstance(obj, h5py.Dataset): if name in toplevel: if overwrite: del toplevel[name] logger.warning("Overwriting %s in %s" % (toplevel[name].name, self.filename)) else: logger.warning("Not overwriting %s in %s" % (toplevel[name].name, self.filename)) return toplevel[name] = obj.value for k, v in obj.attrs.items(): toplevel[name].attrs[k] = v fabio-0.6.0/fabio/fabioformats.py0000644001611600070440000001301113227357030020014 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """ Provide an API to all the supported formats """ __author__ = "Valentin Valls" __contact__ = "valentin.valls@esrf.eu" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __date__ = "27/07/2017" __status__ = "stable" __docformat__ = 'restructuredtext' import logging _logger = logging.getLogger(__name__) from . import fabioimage # Note: The order of the import is important for the import sequence from . import edfimage # noqa from . import adscimage # noqa from . import tifimage # noqa from . import marccdimage # noqa from . import mar345image # noqa from . import fit2dmaskimage # noqa from . import brukerimage # noqa from . import bruker100image # noqa from . import pnmimage # noqa from . import GEimage # noqa from . import OXDimage # noqa from . import dm3image # noqa from . import HiPiCimage # noqa from . import pilatusimage # noqa from . import fit2dspreadsheetimage # noqa from . import kcdimage # noqa from . import cbfimage # noqa from . import xsdimage # noqa from . import binaryimage # noqa from . import pixiimage # noqa from . import raxisimage # noqa from . import numpyimage # noqa from . import eigerimage # noqa from . import hdf5image # noqa from . import fit2dimage # noqa from . import speimage # noqa from . import jpegimage # noqa from . import jpeg2kimage # noqa from . import mpaimage # noqa def get_all_classes(): """Returns the list of supported codec identified by there fabio classes. :rtype: list""" return fabioimage.FabioImage.registry.values() def get_classes(reader=None, writer=None): """ Return available codecs according to filter :param bool reader: True to reach codecs providing reader or False to provide codecs which do not provided reader. If None, reader feature is not filtered :param bool writer: True to reach codecs providing writer or False to provide codecs which do not provided writer. If None, writer feature is not filtered :rtype: list """ formats = [] for f in get_all_classes(): # assert that if the read is redefined, then there is a reader has_reader = f.read.__module__ != fabioimage.__name__ # assert that if the write is redefined, then there is a writer has_writer = f.write.__module__ != fabioimage.__name__ include_format = True if reader is not None and reader != has_reader: include_format = False if writer is not None and writer != has_writer: include_format = False if include_format: formats.append(f) return formats def get_class_by_name(format_name): """ Get a format class by its name. :param str format_name: Format name, for example, "edfimage" :return: instance of the new class """ if format_name in fabioimage.FabioImage.registry: return fabioimage.FabioImage.registry[format_name] else: return None _extension_cache = None """Cache extension mapping""" def _get_extension_mapping(): """Returns a dictionary mapping file extension to the list of supported formats. The result is cached, do not edit it :rtype: dict """ global _extension_cache if _extension_cache is None: _extension_cache = {} for codec in get_all_classes(): for ext in codec.DEFAULT_EXTENSIONS: if ext not in _extension_cache: _extension_cache[ext] = [] _extension_cache[ext].append(codec) return _extension_cache def get_classes_from_extension(extension): """ Returns list of supported file format classes from a file extension :param str extension: File extension, for example "edf" :return: fabio image class """ mapping = _get_extension_mapping() extension = extension.lower() if extension in mapping: # clone the list return list(mapping[extension]) else: return [] def is_extension_supported(extension): """ Returns true is the extension is supported. :param str format_name: Format name, for example, "edfimage" :return: instance of the new class """ mapping = _get_extension_mapping() extension = extension.lower() return extension in mapping fabio-0.6.0/fabio/pilatusimage.py0000644001611600070440000000666013227357030020040 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """ Authors: ........ * Henning O. Sorensen & Erik Knudsen: Center for Fundamental Research: Metal Structures in Four Dimensions; Risoe National Laboratory; Frederiksborgvej 399; DK-4000 Roskilde; email:erik.knudsen@risoe.dk * Jon Wright: European Synchrotron Radiation Facility; Grenoble (France) """ # Get ready for python3: from __future__ import with_statement, print_function # Base this on the tifimage (as Pilatus is tiff with a # tiff header from .tifimage import TifImage class PilatusImage(TifImage): """ Read in Pilatus format, also pilatus images, including header info """ DESCRIPTION = "Pilatus file format based on Tiff" DEFAULT_EXTENSIONS = ["tif", "tiff"] def _readheader(self, infile): """ Parser based approach Gets all entries """ self.header = self.check_header() # infile = open(infile) hstr = infile.read(4096) # well not very pretty - but seems to find start of # header information if (hstr.find(b'# ') == -1): return self.header hstr = hstr[hstr.index(b'# '):] hstr = hstr[:hstr.index(b'\x00')] hstr = hstr.split(b'#') go_on = True while go_on: try: hstr.remove(b'') except Exception: go_on = False for line in hstr: line = line[1:line.index(b'\r\n')] if line.find(b':') > -1: dump = line.split(b':') self.header[dump[0]] = dump[1] elif line.find(b'=') > -1: dump = line.split(b'=') self.header[dump[0]] = dump[1] elif line.find(b' ') > -1: i = line.find(b' ') self.header[line[:i]] = line[i:] elif line.find(b',') > -1: dump = line.split(b',') self.header[dump[0]] = dump[1] return self.header def _read(self, fname): """ inherited from tifimage ... a Pilatus image *is a* tif image just with a header """ return TifImage.read(self, fname) pilatusimage = PilatusImage fabio-0.6.0/fabio/HiPiCimage.py0000644001611600070440000001246213227357030017310 0ustar kiefferscisoft00000000000000#!/usr/bin/env python# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """ Authors: Henning O. Sorensen & Erik Knudsen Center for Fundamental Research: Metal Structures in Four Dimensions Risoe National Laboratory Frederiksborgvej 399 DK-4000 Roskilde email:erik.knudsen@risoe.dk + Jon Wright, ESRF Information about the file format from Masakatzu Kobayashi is highly appreciated """ # Get ready for python3: from __future__ import with_statement, print_function import numpy import logging logger = logging.getLogger(__name__) from .fabioimage import FabioImage class HipicImage(FabioImage): """ Read HiPic images e.g. collected with a Hamamatsu CCD camera""" DESCRIPTION = "HiPic file format from Hamamatsu CCD cameras" DEFAULT_EXTENSIONS = ["img"] def _readheader(self, infile): """ Read in a header from an already open file """ Image_tag = infile.read(2) Comment_len = numpy.fromstring(infile.read(2), numpy.uint16) Dim_1 = numpy.fromstring(infile.read(2), numpy.uint16)[0] Dim_2 = numpy.fromstring(infile.read(2), numpy.uint16)[0] Dim_1_offset = numpy.fromstring(infile.read(2), numpy.uint16)[0] Dim_2_offset = numpy.fromstring(infile.read(2), numpy.uint16)[0] _HeaderType = numpy.fromstring(infile.read(2), numpy.uint16)[0] _Dump = infile.read(50) Comment = infile.read(Comment_len) self.header['Image_tag'] = Image_tag self.header['Dim_1'] = Dim_1 self.header['Dim_2'] = Dim_2 self.header['Dim_1_offset'] = Dim_1_offset self.header['Dim_2_offset'] = Dim_2_offset # self.header['Comment'] = Comment if Image_tag != 'IM': # This does not look like an HiPic file logger.warning("No opening. Corrupt header of HiPic file %s", str(infile.name)) Comment_split = Comment[:Comment.find('\x00')].split('\r\n') for topcomment in Comment_split: topsplit = topcomment.split(',') for line in topsplit: if '=' in line: key, val = line.split('=', 1) # Users cannot type in significant whitespace key = key.rstrip().lstrip() self.header_keys.append(key) self.header[key] = val.lstrip().rstrip() self.header[key] = val.lstrip('"').rstrip('"') def read(self, fname, frame=None): """ Read in header into self.header and the data into self.data """ self.header = self.check_header() self.resetvals() infile = self._open(fname, "rb") self._readheader(infile) # Compute image size try: self.dim1 = int(self.header['Dim_1']) self.dim2 = int(self.header['Dim_2']) except (ValueError, KeyError): raise IOError("HiPic file %s is corrupted, cannot read it" % str(fname)) bytecode = numpy.uint16 self.bpp = len(numpy.array(0, bytecode).tostring()) # Read image data block = infile.read(self.dim1 * self.dim2 * self.bpp) infile.close() # now read the data into the array try: self.data = numpy.reshape( numpy.fromstring(block, bytecode), [self.dim2, self.dim1]) except: logger.debug("%s %s %s %s %s", len(block), bytecode, self.bpp, self.dim2, self.dim1) raise IOError('Size spec in HiPic-header does not match size of image data field') self.bytecode = self.data.dtype.type # Sometimes these files are not saved as 12 bit, # But as 16 bit after bg subtraction - which results # negative values saved as 16bit. Therefore values higher # 4095 is really negative values if self.data.max() > 4095: gt12bit = self.data > 4095 self.data = self.data - gt12bit * (2 ** 16 - 1) # ensure the PIL image is reset self.pilimage = None return self HiPiCimage = HipicImage fabio-0.6.0/fabio/mpaimage.py0000644001611600070440000001030013227357030017116 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: FabIO X-ray image reader # # Copyright (C) 2017 Cornell High Energy Synchrotron Source # Ithaca (New York, USA) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ Author: ........ * Jesse Hopkins: Cornell High Energy Synchrotron Source; Ithaca (New York, USA) mpaimage can read ascii and binary .mpa (multiwire) files """ # Get ready for python3: from __future__ import with_statement, print_function, division, absolute_import import logging import numpy from .fabioimage import FabioImage, OrderedDict logger = logging.getLogger(__name__) class MpaImage(FabioImage): """ FabIO image class for Images from multiwire data files (mpa) """ DESCRIPTION = "multiwire data files" DEFAULT_EXTENSIONS = ["mpa"] def _readheader(self, infile): """ Read and decode the header of an image :param infile: Opened python file (can be stringIO or bzipped file) """ # list of header key to keep the order (when writing) header_prefix = '' tmp_hdr = OrderedDict([("None", OrderedDict())]) while True: line = infile.readline() line = line.decode() if line.find('=') > -1: key, value = line.strip().split('=', 1) key = key.strip() value = value.strip() if header_prefix == '': tmp_hdr["None"][key] = value else: tmp_hdr[header_prefix][key] = value elif line.startswith('[DATA') or line.startswith('[CDAT'): break else: header_prefix = line.strip().strip('[]') tmp_hdr[header_prefix] = {} self.header = OrderedDict() for key, key_data in tmp_hdr.items(): key = str(key) for subkey, subkey_data in key_data.items(): subkey = str(subkey) if key == 'None': self.header[subkey] = subkey_data else: self.header[key + '_' + subkey] = subkey_data def read(self, fname, frame=None): """ Try to read image :param fname: name of the file """ infile = self._open(fname, 'r') self._readheader(infile) if ('ADC1_range' not in self.header.keys() or 'ADC2_range' not in self.header.keys() or 'mpafmt' not in self.header.keys()): logger.error('Error in opening %s: badly formatted mpa header.', fname) raise IOError('Error in opening %s: badly formatted mpa header.' % fname) self.dim1 = int(self.header['ADC1_range']) self.dim2 = int(self.header['ADC2_range']) if self.header['mpafmt'] == 'asc': lines = infile.readlines() else: infile.close() infile = self._open(fname, 'rb') lines = infile.readlines() for i, line in enumerate(lines): if line.startswith(b'[CDAT'): pos = i break img = numpy.array(lines[pos + 1:], dtype=float) self.data = img.reshape((self.dim1, self.dim2)) return self mpaimage = MpaImage fabio-0.6.0/fabio/pnmimage.py0000644001611600070440000002006613227357030017145 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """ Authors: Henning O. Sorensen & Erik Knudsen Center for Fundamental Research: Metal Structures in Four Dimensions Risoe National Laboratory Frederiksborgvej 399 DK-4000 Roskilde email:henning.sorensen@risoe.dk * Jérôme Kieffer: European Synchrotron Radiation Facility; Grenoble (France) License: MIT """ # Get ready for python3: from __future__ import absolute_import, print_function, with_statement, division __authors__ = ["Jérôme Kieffer", "Henning O. Sorensen", "Erik Knudsen"] __date__ = "27/07/2017" __license__ = "MIT+" __copyright__ = "ESRF, Grenoble & Risoe National Laboratory" __status__ = "stable" import logging import numpy logger = logging.getLogger(__name__) from .fabioimage import FabioImage from .fabioutils import six SUBFORMATS = [six.b(i) for i in ('P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7')] HEADERITEMS = [six.b(i) for i in ('SUBFORMAT', 'WIDTH', 'HEIGHT', 'MAXVAL')] P7HEADERITEMS = [six.b(i) for i in ('WIDTH', 'HEIGHT', 'DEPTH', 'MAXVAL', 'TUPLTYPE', 'ENDHDR')] class PnmImage(FabioImage): DESCRIPTION = "PNM file format" DEFAULT_EXTENSIONS = ["pnm", "pgm", "pbm"] def __init__(self, *arg, **kwargs): FabioImage.__init__(self, *arg, **kwargs) self.header['Subformat'] = 'P5' def _readheader(self, f): # pnm images have a 3-line header but ignore lines starting with '#' # 1st line contains the pnm image sub format # 2nd line contains the image pixel dimension # 3rd line contains the maximum pixel value (at least for grayscale - check this) line = f.readline().strip() if line not in SUBFORMATS: raise IOError('unknown subformat of pnm: %s' % line) else: self.header[six.b('SUBFORMAT')] = line if self.header[six.b('SUBFORMAT')] == 'P7': # this one has a special header while six.b('ENDHDR') not in line: line = f.readline() while(line[0] == '#'): line = f.readline() s = line.lsplit(' ', 1) if s[0] not in P7HEADERITEMS: raise IOError('Illegal pam (netpnm p7) headeritem %s' % s[0]) self.header[s[0]] = s[1] else: values = list(line.split()) while len(values) < len(HEADERITEMS): line = f.readline() while line[0] == '#': line = f.readline() values += line.split() for k, v in zip(HEADERITEMS, values): self.header[k] = v.strip() # set the dimensions self.dim1 = int(self.header[six.b("WIDTH")]) self.dim2 = int(self.header[six.b("HEIGHT")]) # figure out how many bytes are used to store the data # case construct here! m = int(self.header[six.b('MAXVAL')]) if m < 256: self.bytecode = numpy.uint8 elif m < 65536: self.bytecode = numpy.uint16 elif m < 2147483648: self.bytecode = numpy.uint32 logger.warning('32-bit pixels are not really supported by the netpgm standard') else: raise IOError('could not figure out what kind of pixels you have') def read(self, fname, frame=None): """ try to read PNM images :param fname: name of the file :param frame: not relevant here! PNM is always single framed """ self.header = self.check_header() self.resetvals() infile = self._open(fname) self._readheader(infile) # read the image data if six.PY3: fmt = str(self.header[six.b('SUBFORMAT')], encoding="latin-1") else: fmt = self.header[six.b('SUBFORMAT')] decoder_name = "%sdec" % fmt if decoder_name in dir(PnmImage): decoder = getattr(PnmImage, decoder_name) self.data = decoder(self, infile, self.bytecode) else: raise IOError("No decoder named %s for file %s" % (decoder_name, fname)) self.resetvals() return self def write(self, fname): """ try to write image. For now, limited to :param fname: name of the file """ self.header[six.b("SUBFORMAT")] = "P5" self.header[six.b("WIDTH")] = self.dim1 self.header[six.b("HEIGHT")] = self.dim2 self.header[six.b("MAXVAL")] = self.data.max() header = six.b(" ".join([str(self.header[key]) for key in HEADERITEMS[1:]])) with open(fname, "wb") as fobj: fobj.write(six.b("P5 \n")) fobj.write(header) fobj.write(six.b(" \n")) if numpy.little_endian: fobj.write(self.data.byteswap().tostring()) else: fobj.write(self.data.tostring()) def P1dec(self, buf, bytecode): data = numpy.zeros((self.dim2, self.dim1)) i = 0 for l in buf: try: data[i, :] = numpy.array(l.split()).astype(bytecode) except ValueError: raise IOError('Size spec in pnm-header does not match size of image data field') return data def P4dec(self, buf, bytecode): err = 'single bit (pbm) images are not supported - yet' logger.error(err) raise NotImplementedError(err) def P2dec(self, buf, bytecode): data = numpy.zeros((self.dim2, self.dim1)) i = 0 for l in buf: try: data[i, :] = numpy.array(l.split()).astype(bytecode) except ValueError: raise IOError('Size spec in pnm-header does not match size of image data field') return data def P5dec(self, buf, bytecode): data = buf.read() try: data = numpy.fromstring(data, bytecode) except ValueError: raise IOError('Size spec in pnm-header does not match size of image data field') data.shape = self.dim2, self.dim1 if numpy.little_endian: data.byteswap(True) return data def P3dec(self, buf, bytecode): err = '(plain-ppm) RGB images are not supported - yet' logger.error(err) raise NotImplementedError(err) def P6dec(self, buf, bytecode): err = '(ppm) RGB images are not supported - yet' logger.error(err) raise NotImplementedError(err) def P7dec(self, buf, bytecode): err = '(pam) images are not supported - yet' logger.error(err) raise NotImplementedError(err) @staticmethod def check_data(data=None): if data is None: return None else: data = data.clip(0, 65535) if data.max() < 256: return data.astype(numpy.uint8) else: return data.astype(numpy.uint16) pnmimage = PnmImage fabio-0.6.0/fabio/brukerimage.py0000644001611600070440000005215013227357030017644 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. """Authors: Henning O. Sorensen & Erik Knudsen Center for Fundamental Research: Metal Structures in Four Dimensions Risoe National Laboratory Frederiksborgvej 399 DK-4000 Roskilde email:erik.knudsen@risoe.dk Based on: openbruker,readbruker, readbrukerheader functions in the opendata module of ImageD11 written by Jon Wright, ESRF, Grenoble, France Writer by Jérôme Kieffer, ESRF, Grenoble, France """ # get ready for python3 compatibility from __future__ import absolute_import, print_function, with_statement, division __authors__ = ["Henning O. Sorensen", "Erik Knudsen", "Jon Wright", "Jérôme Kieffer"] __date__ = "25/07/2017" __status__ = "production" __copyright__ = "2007-2009 Risoe National Laboratory; 2010-2015 ESRF" __licence__ = "MIT" import logging import numpy from math import ceil import os import getpass import time logger = logging.getLogger(__name__) from .fabioimage import FabioImage from .fabioutils import pad, StringTypes class BrukerImage(FabioImage): """ Read and eventually write ID11 bruker (eg smart6500) images TODO: int32 -> float32 conversion according to the "linear" keyword. This is done and works but we need to check with other program that we are appliing the right formula and not the reciprocal one. """ DESCRIPTION = "File format used by Bruker detectors (version 86)" # There is no extension. It is used as frame counter DEFAULT_EXTENSIONS = [] bpp_to_numpy = {1: numpy.uint8, 2: numpy.uint16, 4: numpy.uint32} # needed if you feel like writing - see ImageD11/scripts/edf2bruker.py SPACER = "\x1a\x04" # this is CTRL-Z CTRL-D HEADERS_KEYS = ["FORMAT", # Frame format. Always “86” or "100" for Bruker-format frames. "VERSION", # Header version #, such as: 1 to 17 (6 is obsolete). "HDRBLKS", # Header size in 512-byte blocks, such as 10 or 15. Determines where the image block begins. "TYPE", # String indicating kind of data in the frame. Used to determine if a spatial correction table was applied to the frame imag "SITE", # Site name "MODEL", # Diffractometer model "USER", # Username "SAMPLE", # Sample ID, "SETNAME", # Basic data set name "RUN", # Run number within the data set, usually starts at 0, but 1 for APEX2. "SAMPNUM", # Specimen number within the data set "TITLE", # User comments (8 lines) "NCOUNTS", # Total frame counts "NOVERFL", # Number of overflows when compression frame. "MINIMUM", # Minimum counts in a pixel (uncompressed value) "MAXIMUM", # Maximum counts in a pixel (uncompressed value) "NONTIME", # Number of on-time events "NLATE", # Number of late events. Always zero for many detectors. "FILENAM", # (Original) frame filename "CREATED", # Date and time of creation "CUMULAT", # Accumulated frame exposure time in seconds "ELAPSDR", # Requested time for last exposure in seconds "ELAPSDA", # Actual time for last exposure in seconds. "OSCILLA", # Nonzero if acquired by oscillation "NSTEPS", # steps or oscillations in this frame "RANGE", # Scan range in decimal degrees (unsigned) "START", # Starting scan angle value, decimal degrees "INCREME", # Scan angle increment between frames (signed) "NUMBER", # Sequence number of this frame in series, usually starts at 0, but 1 for APEX2 "NFRAMES", # Total number of frames in the series "ANGLES", # Diffractometer angles in Eulerian space ( 2T, OM, PH, CH). "NOVER64", # Number of pixels > 64K (actually LinearThreshold value) "NPIXELB", # Number of bytes/pixel, such as 1, 2, or 4. "NROWS", # Number of rasters in frame, such as 512, 1024, 2048, or 4096 "NCOLS", # Number of pixels/raster, such as 512, 1024, 2048 or 4096 "WORDORD", # Order of bytes in word (0=LSB first) "LONGORD", # Order of words in a longword (0=LSW first) "TARGET", # X-ray target material: Cu, Mo, Ag, Fe, Cr, Co, Ni, W, Mn, or other. "SOURCEK", # X-ray source voltage in kV "SOURCEM", # X-ray source current in mA "FILTER", # Filter/monochromator setting: Such as: Parallel, graphite, Ni Filter, C Filter, Zr Filter,Cross coupled Goebel Mirrors ... "CELL", # Unit cell A,B,C,ALPHA,BETA,GAMMA "MATRIX", # 9R Orientation matrix (P3 conventions) "LOWTEMP", # Low temp flag. "TEMP", # set temperature "HITEMP", # Acquired at high temperature "ZOOM", # Zoom: Xc, Yc, Mag used for HI-STAR detectors: 0.5 0.5 1.0 "CENTER", # X, Y of direct beam at 2-theta = 0. These are raw center for raw frames and unwarped center for unwarped frames. "DISTANC", # Sample-detector distance, cm (see CmToGrid value) Adds: Sample-detector grid/phosphor distance, cm "TRAILER", # Byte pointer to trailer info "COMPRES", # Compression scheme ID, if any. Such as: NONE, LINEAR (Linear scale, offset for pixel values, typically 1.0, 0.0). "LINEAR", # Linear scale (1.0 0.0 for no change; 0.1 0 for divided by 10...) "PHD", # Discriminator: Pulse height settings. X100 and X1000 only. Stores CCD phosphor efficiency (first field). "PREAMP", # Preamp gain setting. X100 and X1000 only. SMART: Stores Roper CCD gain table index value. "CORRECT", # Flood table correction filename, UNKNOWN or LINEAR. "WARPFIL", # Brass plate correction filename, UNKNOWN or LINEAR. Note: A filename here does NOT mean that spatial correction was performed. See TYPE and string “UNWARP” to determine that. "WAVELEN", # Wavelengths (average, a1, a2) "MAXXY", # X,Y pixel # of maximum counts (from lower corner of 0,0) "AXIS", # Scan axis ib Eulerian space (1-4 for 2-theta, omega, phi, chi) (0 =none, 2 = default). "ENDING", # Actual goniometer angles at end of frame in Eulerian space. "DETPAR", # Detector position corrections (dX,dY,dDist,Pitch,Roll,Yaw) "LUT", # Recommended display lookup table "DISPLIM", # Recommended display limits "PROGRAM", # Name and version of program writing frame, such as: "ROTATE", # Non zero if acquired by rotation of phi during scan (or oscilate) "BITMASK", # File name of active pixel mask associated with this frame or $NULL "OCTMASK", # Octagon mask parameters to use if BITMASK=$null. Min X, Min X+Y, Min Y, Max X-Y, Max X, Max X+Y, Max Y, Max Y-X. "ESDCELL", # Unit cell parameter standard deviations "DETTYPE", # Detector or CCD chip type (as displayed on CEU). Default is MULTIWIRE but UNKNOWN is advised, can contain PIXPERCM: CMTOGRID: "NEXP", # Number of exposures: 1=single, 2=correlated sum.32 for most ccds, and 64 for 2K ccds. "CCDPARM", # CCD parameters: readnoise, e/ADU, e/photon, bias, full scale "BIS", # Potential full linear scale if rescan and attenuator used. "CHEM", # Chemical formula in CIFTAB string, such as “?” "MORPH", # Crystal morphology in CIFTAB string, such as “?” "CCOLOR", # Crystal color in CIFTAB string, such as “?” "CSIZE", # Crystal dimensions (3 ea) in CIFTAB string, such as “?” "DNSMET", # Density measurement method in CIFTAB string, such as “?” "DARK", # Name of dark current correction or NONE. "AUTORNG", # Auto-ranging: gain, high-speed time, scale, offset, full linear scale Note: If full linear scale is zero, then CCDPARM full scale is the full linear scale (BIS frames). "ZEROADJ", # Goniometer zero corrections (refined in least squares) "XTRANS", # Crystal XYZ translations (refined in least squares) "HKL&XY", # HKL and pixel XY for reciprocal space scan. GADDS only. "AXES2", # Diffractometer setting linear axes (4 ea). (X, Y, Z, Aux) "ENDING2", # Actual goniometer linear axes @ end of frame. (X, Y, Z, Aux) "FILTER2", # Monochromator 2-theta angle and monochromator roll angle. v15: Adds beam tilt angle and attenuator factor. "LEPTOS", # String for LEPTOS. "CFR", # Only in 21CFRPart11 mode, writes the checksum for header and image (2str).] ] version = 86 def __init__(self, data=None, header=None): FabioImage.__init__(self, data, header) self.__bpp_file = None self.__headerstring__ = "" def _readheader(self, infile): """ The bruker format uses 80 char lines in key : value format In the first 512*5 bytes of the header there should be a HDRBLKS key, whose value denotes how many 512 byte blocks are in the total header. The header is always n*5*512 bytes, otherwise it wont contain whole key: value pairs """ line = 80 blocksize = 512 nhdrblks = 5 # by default we always read 5 blocks of 512 self.__headerstring__ = infile.read(blocksize * nhdrblks).decode("ASCII") self.header = self.check_header() for i in range(0, nhdrblks * blocksize, line): if self.__headerstring__[i: i + line].find(":") > 0: key, val = self.__headerstring__[i: i + line].split(":", 1) key = key.strip() # remove the whitespace (why?) val = val.strip() if key in self.header: # append lines if key already there self.header[key] = self.header[key] + os.linesep + val else: self.header[key] = val # we must have read this in the first 5*512 bytes. nhdrblks = int(self.header.get('HDRBLKS', 5)) self.header['HDRBLKS'] = nhdrblks # Now read in the rest of the header blocks, appending self.__headerstring__ += infile.read(blocksize * (nhdrblks - 5)).decode("ASCII") for i in range(5 * blocksize, nhdrblks * blocksize, line): if self.__headerstring__[i: i + line].find(":") > 0: # as for first 512 bytes of header key, val = self.__headerstring__[i: i + line].split(":", 1) key = key.strip() val = val.strip() if key in self.header: self.header[key] = self.header[key] + os.linesep + val else: self.header[key] = val # make a (new) header item called "datastart" self.header['datastart'] = blocksize * nhdrblks # set the image dimensions self.dim1 = int(self.header['NROWS'].split()[0]) self.dim2 = int(self.header['NCOLS'].split()[0]) self.version = int(self.header.get('VERSION', "86")) def read(self, fname, frame=None): """ Read in and unpack the pixels (including overflow table """ with self._open(fname, "rb") as infile: try: self._readheader(infile) except Exception as err: raise RuntimeError("Unable to parse Bruker headers: %s" % err) rows = self.dim1 cols = self.dim2 try: # you had to read the Bruker docs to know this! npixelb = int(self.header['NPIXELB']) except Exception: errmsg = "length " + str(len(self.header['NPIXELB'])) + "\n" for byt in self.header['NPIXELB']: errmsg += "char: " + str(byt) + " " + str(ord(byt)) + "\n" logger.warning(errmsg) raise RuntimeError(errmsg) data = numpy.fromstring(infile.read(rows * cols * npixelb), dtype=self.bpp_to_numpy[npixelb]) if not numpy.little_endian and data.dtype.itemsize > 1: data.byteswap(True) # handle overflows nov = int(self.header['NOVERFL']) if nov > 0: # Read in the overflows # need at least int32 sized data I guess - can reach 2^21 data = data.astype(numpy.uint32) # 16 character overflows: # 9 characters of intensity # 7 character position for _ in range(nov): ovfl = infile.read(16) intensity = int(ovfl[0: 9]) position = int(ovfl[9: 16]) data[position] = intensity # infile.close() # Handle Float images ... if "LINEAR" in self.header: try: slope, offset = self.header["LINEAR"].split(None, 1) slope = float(slope) offset = float(offset) except Exception: logger.warning("Error in converting to float data with linear parameter: %s" % self.header["LINEAR"]) slope = 1 offset = 0 if (slope != 1) or (offset != 0): # TODO: check that the formula is OK, not reverted. logger.warning("performing correction with slope=%s, offset=%s (LINEAR=%s)" % (slope, offset, self.header["LINEAR"])) data = (data * slope + offset).astype(numpy.float32) self.data = data.reshape(self.dim1, self.dim2) self.resetvals() self.pilimage = None return self def write(self, fname): """ Write a bruker image """ if numpy.issubdtype(self.data.dtype, float): if "LINEAR" in self.header: try: slope, offset = self.header["LINEAR"].split(None, 1) slope = float(slope) offset = float(offset) except Exception: logger.warning("Error in converting to float data with linear parameter: %s" % self.header["LINEAR"]) slope, offset = 1.0, 0.0 else: offset = self.data.min() max_data = self.data.max() max_range = 2 ** 24 - 1 # similar to the mantissa of a float32 if max_data > offset: slope = (max_data - offset) / float(max_range) else: slope = 1.0 tmp_data = numpy.round(((self.data - offset) / slope)).astype(numpy.uint32) self.header["LINEAR"] = "%s %s" % (slope, offset) else: tmp_data = self.data bpp = self.calc_bpp(tmp_data) self.basic_translate(fname) limit = 2 ** (8 * bpp) - 1 data = tmp_data.astype(self.bpp_to_numpy[bpp]) reset = numpy.where(tmp_data >= limit) data[reset] = limit if not numpy.little_endian and bpp > 1: # Bruker enforces little endian data.byteswap(True) with self._open(fname, "wb") as bruker: bruker.write(self.gen_header().encode("ASCII")) bruker.write(data.tostring()) bruker.write(self.gen_overflow().encode("ASCII")) def calc_bpp(self, data=None, max_entry=4096): """ Calculate the number of byte per pixel to get an optimal overflow table. :return: byte per pixel """ if data is None: data = self.data if self.__bpp_file is None: for i in [1, 2]: overflown = (data >= (2 ** (8 * i) - 1)) if overflown.sum() < max_entry: self.__bpp_file = i break else: self.__bpp_file = 4 return self.__bpp_file def gen_header(self): """ Generate headers (with some magic and guesses) :param format can be 86 or 100 """ headers = [] for key in self.HEADERS_KEYS: if key in self.header: value = self.header[key] line = key.ljust(7) + ":" if type(value) in StringTypes: if os.linesep in value: lines = value.split(os.linesep) for i in lines[:-1]: headers.append((line + str(i)).ljust(80, " ")) line = key.ljust(7) + ":" line += str(lines[-1]) elif len(value) < 72: line += str(value) else: for i in range(len(value) // 72): headers.append((line + str(value[72 * i:72 * (i + 1)]))) line = key.ljust(7) + ":" line += value[72 * (i + 1):] elif "__len__" in dir(value): f = "\%.%is" % 72 // len(value) - 1 line += " ".join([f % i for i in value]) else: line += str(value) headers.append(line.ljust(80, " ")) header = "".join(headers) if len(header) > 512 * self.header["HDRBLKS"]: tmp = ceil(len(header) / 512.0) self.header["HDRBLKS"] = int(ceil(tmp / 5.0) * 5.0) for i in range(len(headers)): if headers[i].startswith("HDRBLKS"): headers[i] = headers.append(("HDRBLKS:%s" % self.header["HDRBLKS"]).ljust(80, " ")) res = pad("".join(headers), self.SPACER + "." * 78, 512 * int(self.header["HDRBLKS"])) return res def gen_overflow(self): """ Generate an overflow table """ limit = 2 ** (8 * self.calc_bpp()) - 1 flat = self.data.ravel() # flat memory view overflow_pos = numpy.where(flat >= limit)[0] # list of indexes overflow_val = flat[overflow_pos] overflow = "".join(["%09i%07i" % (val, pos) for pos, val in zip(overflow_pos, overflow_val)]) return pad(overflow, ".", 512) def basic_translate(self, fname=None): """ Does some basic population of the headers so that the writing is possible """ if "FORMAT" not in self.header: self.header["FORMAT"] = "86" if "HDRBLKS" not in self.header: self.header["HDRBLKS"] = 5 if "TYPE" not in self.header: self.header["TYPE"] = "UNWARPED" if "USER" not in self.header: self.header["USER"] = getpass.getuser() if "FILENAM" not in self.header: self.header["FILENAM"] = "%s" % fname if "CREATED" not in self.header: self.header["CREATED"] = time.ctime() if "NOVERFL" not in self.header: self.header["NOVERFL"] = "0" # if not "NPIXELB" in self.header: self.header["NPIXELB"] = self.calc_bpp() # if not "NROWS" in self.header: self.header["NROWS"] = self.data.shape[0] # if not "NCOLS" in self.header: self.header["NCOLS"] = self.data.shape[1] if "WORDORD" not in self.header: self.header["WORDORD"] = "0" if "LONGORD" not in self.header: self.header["LONGORD"] = "0" brukerimage = BrukerImage fabio-0.6.0/fabio/setup.py0000644001611600070440000000354213227357030016510 0ustar kiefferscisoft00000000000000# coding: utf-8 # /*########################################################################## # # Copyright (c) 2015-2016 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ __authors__ = ["V. Valls"] __license__ = "MIT" __date__ = "31/07/2017" from numpy.distutils.misc_util import Configuration def configuration(parent_package='', top_path=None): config = Configuration('fabio', parent_package, top_path) config.add_subpackage('app') config.add_subpackage('benchmark') config.add_subpackage('ext') config.add_subpackage('test') config.add_subpackage('third_party') config.add_subpackage('utils') return config if __name__ == "__main__": from numpy.distutils.core import setup setup(configuration=configuration) fabio-0.6.0/fabio/third_party/0000755001611600070440000000000013227375744017337 5ustar kiefferscisoft00000000000000fabio-0.6.0/fabio/third_party/gzip.py0000644001611600070440000000407113227357030020650 0ustar kiefferscisoft00000000000000# coding: utf-8 # /*########################################################################## # # Copyright (c) 2015-2016 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ """Wrapper module for the `gzip` library. Feed this module using a local copy of `gzip` if it exists. Else it expect to have an available `gzip` library installed in the Python path. It should be used like that: .. code-block:: from fabio.third_party import gzip """ from __future__ import absolute_import __authors__ = ["Valentin Valls"] __license__ = "MIT" __date__ = "28/07/2017" import sys as __sys if __sys.version_info < (2, 7): # Try to import our local version of six from ._local.gzip import * # noqa else: # Else try to import it from the python path # Importing star here is not working # from gzip import * # noqa import gzip as __gzip for k, v in __gzip.__dict__.items(): if k.startswith("_"): continue vars()[k] = v fabio-0.6.0/fabio/third_party/__init__.py0000644001611600070440000000001613227357030021431 0ustar kiefferscisoft00000000000000# Place holderfabio-0.6.0/fabio/third_party/argparse.py0000644001611600070440000000356613227357030021513 0ustar kiefferscisoft00000000000000# coding: utf-8 # /*########################################################################## # # Copyright (c) 2015-2016 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ """Wrapper module for the `argparse` library. Feed this module using a local copy of `argparse` if it exists. Else it expect to have an available `argparse` library installed in the Python path. It should be used like that: .. code-block:: from fabio.third_party import argparse """ from __future__ import absolute_import __authors__ = ["Valentin Valls"] __license__ = "MIT" __date__ = "28/07/2017" try: # try to import our local version of six from ._local.argparse import * # noqa except ImportError: # else try to import it from the python path from argparse import * # noqa fabio-0.6.0/fabio/third_party/six.py0000644001611600070440000000374713227357030020513 0ustar kiefferscisoft00000000000000# coding: utf-8 # /*########################################################################## # # Copyright (c) 2015-2016 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ """Wrapper module for the `six` library. Feed this module using a local silx copy of `six` if it exists. Else it expect to have an available `six` library installed in the Python path. It should be used like that: .. code-block:: from fabio.third_party import six """ from __future__ import absolute_import __authors__ = ["Valentin Valls"] __license__ = "MIT" __date__ = "28/07/2017" try: # try to import our local version of six from ._local.six import * # noqa except ImportError: # else try to import it from the python path import six if tuple(int(i) for i in six.__version__.split(".")[:2]) < (1, 8): raise ImportError("Six version is too old") from six import * # noqa fabio-0.6.0/fabio/third_party/setup.py0000644001611600070440000000366713227357030021051 0ustar kiefferscisoft00000000000000# coding: ascii # # JK: Numpy.distutils which imports this does not handle utf-8 in version<1.12 # # /*########################################################################## # # Copyright (c) 2016 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ __authors__ = ["Valentin Valls"] __license__ = "MIT" __date__ = "31/07/2017" import os from numpy.distutils.misc_util import Configuration def configuration(parent_package='', top_path=None): config = Configuration('third_party', parent_package, top_path) # includes _local only if it is available local_path = os.path.join(top_path, parent_package, "third_party", "_local") if os.path.exists(local_path): config.add_subpackage('_local') return config if __name__ == "__main__": from numpy.distutils.core import setup setup(configuration=configuration) fabio-0.6.0/fabio/third_party/ordereddict.py0000644001611600070440000000362613227357030022174 0ustar kiefferscisoft00000000000000# coding: utf-8 # /*########################################################################## # # Copyright (c) 2015-2016 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ """Wrapper module for the `ordereddict` library. Feed this module using a local copy of `ordereddict` if it exists. Else it expect to have an available `ordereddict` library installed in the Python path. It should be used like that: .. code-block:: from fabio.third_party.ordereddict import OrderedDict """ from __future__ import absolute_import __authors__ = ["Valentin Valls"] __license__ = "MIT" __date__ = "28/07/2017" try: # try to import our local version of six from ._local.ordereddict import * # noqa except ImportError: # else try to import it from the python path from collections import OrderedDict fabio-0.6.0/fabio/speimage.py0000644001611600070440000003226413227357030017145 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # Copyright (C) 2016 Univeristy Köln, Germany # # Principal author: Clemens Prescher (c.prescher@uni-koeln.de) # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. """Princeton instrument SPE image reader for FabIO """ # Get ready for python3: from __future__ import with_statement, print_function, division __authors__ = ["Clemens Prescher"] __contact__ = "c.prescher@uni-koeln.de" __license__ = "MIT" __copyright__ = "Clemens Prescher" __date__ = "24/07/2017" import logging logger = logging.getLogger(__name__) import datetime from xml.dom.minidom import parseString import numpy as np from numpy.polynomial.polynomial import polyval from .fabioimage import FabioImage class SpeImage(FabioImage): """FabIO image class for Images for Princeton/SPE detector Put some documentation here """ DATA_TYPES = {0: np.float32, 1: np.int32, 2: np.int16, 3: np.uint16} DESCRIPTION = "Princeton instrument SPE file format" DEFAULT_EXTENSIONS = ["spe"] def _readheader(self, infile): """ Read and decode the header of an image: :param infile: Opened python file (can be stringIO or bipped file) """ self.header['version'] = self._get_version(infile) self.header['data_type'] = self._read_at(infile, 108, 1, np.uint16)[0] self.header['x_dim'] = int(self._read_at(infile, 42, 1, np.int16)[0]) self.header['y_dim'] = int(self._read_at(infile, 656, 1, np.int16)[0]) self.header['num_frames'] = self._read_at(infile, 1446, 1, np.int32)[0] if self.header['version'] == 2: self.header['time'] = self._read_date_time_from_header(infile) self.header['x_calibration'] = self._read_calibration_from_header(infile) self.header['exposure_time'] = self._read_at(infile, 10, 1, np.float32)[0] self.header['detector'] = 'unspecified' self.header['grating'] = str(self._read_at(infile, 650, 1, np.float32)[0]) self.header['center_wavelength'] = float(self._read_at(infile, 72, 1, np.float32)[0]) # # self._read_roi_from_header() # self._read_num_frames_from_header() # self._read_num_combined_frames_from_header() elif self.header['version'] == 3: xml_string = self._get_xml_string(infile) dom = self._create_dom_from_xml(xml_string) self.header['time'] = self._read_date_time_from_dom(dom) self.header['roi'] = self._read_roi_from_dom(dom) self.header['x_calibration'] = self._read_calibration_from_dom(dom) self.header['exposure_time'] = self._read_exposure_from_dom(dom) self.header['detector'] = self._read_detector_from_dom(dom) self.header['grating'] = self._read_grating_from_dom(dom, infile) self.header['center_wavelength'] = self._read_center_wavelength_from_dom(dom, infile) self.header = self.check_header(self.header) def read(self, fname, frame=None): """ try to read image :param fname: name of the file :param frame: """ self.resetvals() with self._open(fname, 'rb') as infile: self._readheader(infile) # read the image data and declare self.data = self._read_data(infile, frame) return self def _get_version(self, infile): self.xml_offset = self._read_at(infile, 678, 1, np.long)[0] if self.xml_offset == 0: return 2 else: return 3 def _read_date_time_from_header(self, infile): """Reads the collection time from the header into the date_time field""" raw_date = self._read_at(infile, 20, 9, np.int8) raw_time = self._read_at(infile, 172, 6, np.int8) str_date = ''.join([chr(i) for i in raw_date]) str_date += ''.join([chr(i) for i in raw_time]) date_time = datetime.datetime.strptime(str_date, "%d%b%Y%H%M%S") return date_time.strftime("%m/%d/%Y %H:%M:%S") def _read_date_time_from_dom(self, dom): """Reads the time of collection and saves it date_time field""" date_time_str = dom.getElementsByTagName('Origin')[0].getAttribute('created') try: date_time = datetime.datetime.strptime(date_time_str[:-7], "%Y-%m-%dT%H:%M:%S.%f") return date_time.strftime("%m/%d/%Y %H:%M:%S.%f") except ValueError: date_time = datetime.datetime.strptime(date_time_str[:-6], "%Y-%m-%dT%H:%M:%S") return date_time.strftime("%m/%d/%Y %H:%M:%S") def _read_calibration_from_header(self, infile): """Reads the calibration from the header into the x_calibration field""" x_polynocoeff = self._read_at(infile, 3263, 6, np.double) x_val = np.arange(self.header['x_dim']) + 1 return np.array(polyval(x_val, x_polynocoeff)) def _read_calibration_from_dom(self, dom): """Reads the x calibration of the image from the xml footer and saves it in the x_calibration field""" spe_format = dom.childNodes[0] calibrations = spe_format.getElementsByTagName('Calibrations')[0] wavelengthmapping = calibrations.getElementsByTagName('WavelengthMapping')[0] wavelengths = wavelengthmapping.getElementsByTagName('Wavelength')[0] wavelength_values = wavelengths.childNodes[0] x_calibration = np.array([float(i) for i in wavelength_values.toxml().split(',')]) return x_calibration[self.header['roi'][0]:self.header['roi'][1]] def _read_num_frames_from_header(self, infile): self.num_frames = self._read_at(infile, 1446, 1, np.int32)[0] def _get_xml_string(self, infile): """Reads out the xml string from the file end""" if "size" in dir(infile): size = infile.size elif "measure_size" in dir(infile): size = infile.measure_size() else: raise RuntimeError("Unable to guess the actual size of the file") xml_size = size - self.xml_offset xml = self._read_at(infile, self.xml_offset, xml_size, np.byte) return ''.join([chr(i) for i in xml]) # if self.debug: # fid = open(self.filename + '.xml', 'w') # for line in self.xml_string: # fid.write(line) # fid.close() def _create_dom_from_xml(self, xml_string): """Creates a DOM representation of the xml footer and saves it in the dom field""" return parseString(xml_string) def _read_exposure_from_dom(self, dom): """Reads th exposure time of the experiment into the exposure_time field""" if len(dom.getElementsByTagName('Experiment')) != 1: # check if it is a real v3.0 file if len(dom.getElementsByTagName('ShutterTiming')) == 1: # check if it is a pixis detector exposure_time = dom.getElementsByTagName('ExposureTime')[0].childNodes[0] return np.float(exposure_time.toxml()) / 1000.0 else: exposure_time = dom.getElementsByTagName('ReadoutControl')[0]. \ getElementsByTagName('Time')[0].childNodes[0].nodeValue self.header['accumulations'] = dom.getElementsByTagName('Accumulations')[0].childNodes[0].nodeValue return np.float(exposure_time) * np.float(self.header['accumulations']) else: # this is searching for legacy experiment: self._exposure_time = dom.getElementsByTagName('LegacyExperiment')[0]. \ getElementsByTagName('Experiment')[0]. \ getElementsByTagName('CollectionParameters')[0]. \ getElementsByTagName('Exposure')[0].attributes["value"].value return np.float(self._exposure_time.split()[0]) def _read_detector_from_dom(self, dom): """Reads the detector information from the dom object""" self._camera = dom.getElementsByTagName('Camera') if len(self._camera) >= 1: return self._camera[0].getAttribute('model') else: return 'unspecified' def _read_grating_from_dom(self, dom, infile): """Reads the type of grating from the dom Model""" try: grating = dom.getElementsByTagName('Devices')[0]. \ getElementsByTagName('Spectrometer')[0]. \ getElementsByTagName('Grating')[0]. \ getElementsByTagName('Selected')[0].childNodes[0].toxml() return grating.split('[')[1].split(']')[0].replace(',', ' ') except IndexError: # try from header: return str(self._read_at(infile, 650, 1, np.float32)[0]) def _read_center_wavelength_from_dom(self, dom, infile): """Reads the center wavelength from the dom Model and saves it center_wavelength field""" try: center_wavelength = dom.getElementsByTagName('Devices')[0]. \ getElementsByTagName('Spectrometer')[0]. \ getElementsByTagName('Grating')[0]. \ getElementsByTagName('CenterWavelength')[0]. \ childNodes[0].toxml() return float(center_wavelength) except IndexError: # try from header return float(self._read_at(infile, 72, 1, np.float32)[0]) def _read_roi_from_dom(self, dom): """Reads the ROIs information defined in the SPE file. Depending on the modus it will read out: For CustomRegions roi_x, roi_y, roi_width, roi_height, roi_x_binning, roi_y_binning For FullSensor roi_x,roi_y, roi_width, roi_height""" try: roi_modus = str(dom.getElementsByTagName('ReadoutControl')[0]. getElementsByTagName('RegionsOfInterest')[0]. getElementsByTagName('Selection')[0]. childNodes[0].toxml()) if roi_modus == 'CustomRegions': roi_dom = dom.getElementsByTagName('ReadoutControl')[0]. \ getElementsByTagName('RegionsOfInterest')[0]. \ getElementsByTagName('CustomRegions')[0]. \ getElementsByTagName('RegionOfInterest')[0] roi_x = int(roi_dom.attributes['x'].value) roi_y = int(roi_dom.attributes['y'].value) roi_width = int(roi_dom.attributes['width'].value) roi_height = int(roi_dom.attributes['height'].value) else: roi_x = 0 roi_y = 0 roi_width = self.header['x_dim'] roi_height = self.header['y_dim'] except IndexError: roi_x = 0 roi_y = 0 roi_width = self.header['x_dim'] roi_height = self.header['y_dim'] return roi_x, roi_x + roi_width, roi_y, roi_y + roi_height def _read_at(self, infile, pos, size, ntype): infile.seek(pos) dtype = np.dtype(ntype) bp = dtype.itemsize data = infile.read(size * bp) return np.fromstring(data, dtype) def _read_data(self, infile, frame=None): if frame is None: frame = 0 dtype = self.DATA_TYPES.get(self.header['data_type']) if dtype is None: raise RuntimeError("Unsuported data type: %s" % self.header['data_type']) number_size = np.dtype(dtype).itemsize frame_size = self.header['x_dim'] * self.header['y_dim'] * number_size return self._read_frame(infile, 4100 + frame * frame_size) def _read_frame(self, infile, pos=None): """Reads in a frame at a specific binary position. The following header parameters have to be predefined before calling this function: datatype - either 0,1,2,3 for float32, int32, int16 or uint16 x_dim, y_dim - being the dimensions. """ if pos is None: pos = infile.tell() dtype = self.DATA_TYPES.get(self.header['data_type']) if dtype is None: return None data = self._read_at(infile, pos, self.header['x_dim'] * self.header['y_dim'], dtype) return data.reshape((self.header['y_dim'], self.header['x_dim'])) # this is not compatibility with old code: speimage = SpeImage fabio-0.6.0/fabio/pixiimage.py0000644001611600070440000001263513227357030017327 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """ Author: Jon Wright, ESRF. """ # Get ready for python3: from __future__ import with_statement, print_function __authors__ = ["Jon Wright", "Jérôme Kieffer"] __contact__ = "wright@esrf.fr" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __date__ = "25/07/2017" import numpy import os import logging logger = logging.getLogger(__name__) from .fabioimage import FabioImage from .fabioutils import previous_filename, next_filename class PixiImage(FabioImage): DESCRIPTION = "Pixi file format" DEFAULT_EXTENSIONS = [] _need_a_seek_to_read = True def _readheader(self, infile): infile.seek(0) self.header = self.check_header() byt = infile.read(4) framesize = numpy.fromstring(byt, numpy.int32) if framesize == 243722: # life is good width = 476 height = 512 offset = 24 self.header['framesize'] = framesize self.header['width'] = width self.header['height'] = height self.header['offset'] = offset else: logger.warning("Pixiimage, bad framesize: %s", framesize) raise def read(self, fname, frame=None): if frame is None: frame = 0 self.header = self.check_header() self.resetvals() with self._open(fname, "rb") as infile: self.sequencefilename = fname self._readheader(infile) self.nframes = os.path.getsize(fname) / 487448 self._readframe(infile, frame) # infile.close() return self def _makeframename(self): self.filename = "%s$%04d" % (self.sequencefilename, self.currentframe) def _readframe(self, filepointer, img_num): if (img_num > self.nframes or img_num < 0): raise Exception("Bad image number") imgstart = self.header['offset'] + img_num * (512 * 476 * 2 + 24) filepointer.seek(imgstart, 0) self.data = numpy.fromstring(filepointer.read(512 * 476 * 2), numpy.uint16) self.data.shape = self.header['height'], self.header['width'] self.dim2, self.dim1 = self.data.shape self.currentframe = int(img_num) self._makeframename() def getframe(self, num): """ Returns a frame as a new FabioImage object """ if num < 0 or num > self.nframes: raise Exception("Requested frame number is out of range") # Do a deep copy of the header to make a new one newheader = {} for k in self.header.keys(): newheader[k] = self.header[k] frame = pixiimage(header=newheader) frame.nframes = self.nframes frame.sequencefilename = self.sequencefilename infile = frame._open(self.sequencefilename, "rb") frame._readframe(infile, num) infile.close() return frame def next(self): """ Get the next image in a series as a fabio image """ if self.currentframe < (self.nframes - 1) and self.nframes > 1: return self.getframe(self.currentframe + 1) else: newobj = pixiimage() newobj.read(next_filename( self.sequencefilename)) return newobj def previous(self): """ Get the previous image in a series as a fabio image """ if self.currentframe > 0: return self.getframe(self.currentframe - 1) else: newobj = pixiimage() newobj.read(previous_filename( self.sequencefilename)) return newobj pixiimage = PixiImage def demo(fname): i = PixiImage() i.read(fname) import pylab pylab.imshow(numpy.log(i.data)) print("%s\t%s\t%s\t%s" % (i.filename, i.data.max(), i.data.min(), i.data.mean())) pylab.title(i.filename) pylab.show() while 1: i = i.next() pylab.imshow(numpy.log(i.data)) pylab.title(i.filename) pylab.show() raw_input() if __name__ == "__main__": import sys demo(sys.argv[1]) fabio-0.6.0/fabio/tifimage.py0000644001611600070440000002663313227357030017143 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: FabIO X-ray image reader # # Copyright (C) 2010-2016 European Synchrotron Radiation Facility # Grenoble, France # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ FabIO class for dealing with TIFF images. In facts wraps TiffIO from V. Armando Solé (available in PyMca) or falls back to PIL Authors: ........ * Henning O. Sorensen & Erik Knudsen: Center for Fundamental Research: Metal Structures in Four Dimensions; Risoe National Laboratory; Frederiksborgvej 399; DK-4000 Roskilde; email:erik.knudsen@risoe.dk * Jérôme Kieffer: European Synchrotron Radiation Facility; Grenoble (France) License: MIT """ # Get ready for python3: from __future__ import with_statement, print_function, division __authors__ = ["Jérôme Kieffer", "Henning O. Sorensen", "Erik Knudsen"] __date__ = "11/08/2017" __license__ = "MIT" __copyright__ = "ESRF, Grenoble & Risoe National Laboratory" __status__ = "stable" import time import logging import struct logger = logging.getLogger(__name__) try: from PIL import Image except ImportError: Image = None import numpy from .utils import pilutils from .fabioimage import FabioImage from .TiffIO import TiffIO LITTLE_ENDIAN = 1234 BIG_ENDIAN = 3412 TYPES = { 0: 'invalid', 1: 'byte', 2: 'ascii', 3: 'short', 4: 'long', 5: 'rational', 6: 'sbyte', 7: 'undefined', 8: 'sshort', 9: 'slong', 10: 'srational', 11: 'float', 12: 'double' } TYPESIZES = { 0: 0, 1: 1, 2: 1, 3: 2, 4: 4, 5: 8, 6: 1, 7: 1, 8: 2, 9: 4, 10: 8, 11: 4, 12: 8 } baseline_tiff_tags = { 256: 'ImageWidth', 257: 'ImageLength', 306: 'DateTime', 315: 'Artist', 258: 'BitsPerSample', 265: 'CellLength', 264: 'CellWidth', 259: 'Compression', 262: 'PhotometricInterpretation', 296: 'ResolutionUnit', 282: 'XResolution', 283: 'YResolution', 278: 'RowsPerStrip', 273: 'StripOffset', 279: 'StripByteCounts', 270: 'ImageDescription', 271: 'Make', 272: 'Model', 320: 'ColorMap', 305: 'Software', 339: 'SampleFormat', 33432: 'Copyright' } class TifImage(FabioImage): """ Images in TIF format Wraps TiffIO """ DESCRIPTION = "Tagged image file format" DEFAULT_EXTENSIONS = ["tif", "tiff"] _need_a_seek_to_read = True def __init__(self, *args, **kwds): """ Tifimage constructor adds an nbits member attribute """ self.nbits = None FabioImage.__init__(self, *args, **kwds) self.lib = None def _readheader(self, infile): """ Try to read Tiff images header... """ # try: # self.header = { "filename" : infile.name } # except AttributeError: # self.header = {} # t = Tiff_header(infile.read()) # self.header = t.header # try: # self.dim1 = int(self.header['ImageWidth']) # self.dim2 = int(self.header['ImageLength']) # except (KeyError): # logger.warning("image dimensions could not be determined from header tags, trying to go on anyway") # read the first 32 bytes to determine size header = numpy.fromstring(infile.read(64), numpy.uint16) self.dim1 = int(header[9]) self.dim2 = int(header[15]) # nbits is not a FabioImage attribute... self.nbits = int(header[21]) # number of bits def read(self, fname, frame=None): """ Wrapper for TiffIO. """ infile = self._open(fname, "rb") self._readheader(infile) infile.seek(0) self.lib = None try: tiffIO = TiffIO(infile) if tiffIO.getNumberOfImages() > 0: # No support for now of multi-frame tiff images self.data = tiffIO.getImage(0) self.header = tiffIO.getInfo(0) except Exception as error: logger.warning("Unable to read %s with TiffIO due to %s, trying PIL" % (fname, error)) else: if self.data.ndim == 2: self.dim2, self.dim1 = self.data.shape elif self.data.ndim == 3: self.dim2, self.dim1, _ = self.data.shape logger.warning("Third dimension is the color") else: logger.warning("dataset has %s dimensions (%s), check for errors !!!!", self.data.ndim, self.data.shape) self.lib = "TiffIO" if (self.lib is None): if Image: try: infile.seek(0) self.pilimage = Image.open(infile) except Exception: logger.error("Error in opening %s with PIL" % fname) self.lib = None infile.seek(0) else: self.lib = "PIL" self.data = pilutils.get_numpy_array(self.pilimage) else: logger.error("Error in opening %s: no tiff reader managed to read the file.", fname) self.lib = None infile.seek(0) self.resetvals() return self def write(self, fname): """ Overrides the FabioImage.write method and provides a simple TIFF image writer. :param str fname: name of the file to save the image to """ with TiffIO(fname, mode="w") as tIO: tIO.writeImage(self.data, info=self.header, software="fabio.tifimage", date=time.ctime()) # define a couple of helper classes here: class Tiff_header(object): def __init__(self, string): if string[:4] == "II\x2a\x00": self.byteorder = LITTLE_ENDIAN elif string[:4] == 'MM\x00\x2a': self.byteorder = BIG_ENDIAN else: logger.warning("Warning: This does not appear to be a tiff file") # the next two bytes contains the offset of the oth IFD offset_first_ifd = struct.unpack_from("h", string[4:])[0] self.ifd = [Image_File_Directory()] offset_next = self.ifd[0].unpack(string, offset_first_ifd) while (offset_next != 0): self.ifd.append(Image_File_Directory()) offset_next = self.ifd[-1].unpack(string, offset_next) self.header = {} # read the values of the header items into a dictionary for entry in self.ifd[0].entries: if entry.tag in baseline_tiff_tags.keys(): self.header[baseline_tiff_tags[entry.tag]] = entry.val else: self.header[entry.tag] = entry.val class Image_File_Directory(object): def __init__(self, instring=None, offset=-1): self.entries = [] self.offset = offset self.count = None def unpack(self, instring, offset=-1): if (offset == -1): offset = self.offset strInput = instring[offset:] self.count = struct.unpack_from("H", strInput[:2])[0] # 0th IFD contains count-1 entries (count includes the adress of the next IFD) for i in range(self.count - 1): e = Image_File_Directory_entry().unpack(strInput[2 + 12 * (i + 1):]) if (e is not None): self.entries.append(e) # extract data associated with tags for e in self.entries: if (e.val is None): e.extract_data(instring) # do we have some more ifds in this file offset_next = struct.unpack_from("L", instring[offset + 2 + self.count * 12:])[0] return offset_next class Image_File_Directory_entry(object): def __init__(self, tag=0, tag_type=0, count=0, offset=0): self.tag = tag self.tag_type = tag_type self.count = count self.val_offset = offset self.val = None def unpack(self, strInput): idfentry = strInput[:12] ################################################################################ # # TOFIX: How is it possible that HHL (2+2+4 bytes has a size of ) ################################################################################ (tag, tag_type, count) = struct.unpack_from("HHL", idfentry) self.tag = tag self.count = count self.tag_type = tag_type self.val = None if (count <= 0): logger.warning("Tag # %s has an invalid count: %s. Tag is ignored" % (tag, count)) return if(count * TYPESIZES[tag_type] <= 4): self.val_offset = 8 self.extract_data(idfentry) self.val_offset = None else: self.val_offset = struct.unpack_from("L", idfentry[8:])[0] return self def extract_data(self, full_string): tag_type = self.tag_type if (TYPES[tag_type] == 'byte'): self.val = struct.unpack_from("B", full_string[self.val_offset:])[0] elif (TYPES[tag_type] == 'short'): self.val = struct.unpack_from("H", full_string[self.val_offset:])[0] elif (TYPES[tag_type] == 'long'): self.val = struct.unpack_from("L", full_string[self.val_offset:])[0] elif (TYPES[tag_type] == 'sbyte'): self.val = struct.unpack_from("b", full_string[self.val_offset:])[0] elif (TYPES[tag_type] == 'sshort'): self.val = struct.unpack_from("h", full_string[self.val_offset:])[0] elif (TYPES[tag_type] == 'slong'): self.val = struct.unpack_from("l", full_string[self.val_offset:])[0] elif (TYPES[tag_type] == 'ascii'): self.val = full_string[self.val_offset:self.val_offset + max(self.count, 4)] elif (TYPES[tag_type] == 'rational'): if self.val_offset is not None: (num, den) = struct.unpack_from("LL", full_string[self.val_offset:]) self.val = float(num) / den elif (TYPES[tag_type] == 'srational'): if self.val_offset is not None: (num, den) = struct.unpack_from("ll", full_string[self.val_offset:]) self.val = float(num) / den, elif (TYPES[tag_type] == 'float'): self.val = struct.unpack_from("f", full_string[self.val_offset:])[0] elif (TYPES[tag_type] == 'double'): if self.val_offset is not None: self.val = struct.unpack_from("d", full_string[self.val_offset:])[0] else: logger.warning("unrecognized type of strInputentry self: %s tag: %s type: %s TYPE: %s" % (self, baseline_tiff_tags[self.tag], self.tag_type, TYPES[tag_type])) tifimage = TifImage fabio-0.6.0/fabio/edfimage.py0000644001611600070440000012001113227357030017100 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION W """ License: MIT Authors: ........ * Henning O. Sorensen & Erik Knudsen: Center for Fundamental Research: Metal Structures in Four Dimensions; Risoe National Laboratory; Frederiksborgvej 399; DK-4000 Roskilde; email:erik.knudsen@risoe.dk * Jon Wright & Jérôme Kieffer: European Synchrotron Radiation Facility; Grenoble (France) """ # get ready for python3 from __future__ import with_statement, print_function, absolute_import, division import os import re import string import logging logger = logging.getLogger(__name__) import numpy from .fabioimage import FabioImage, OrderedDict from .fabioutils import isAscii, toAscii, nice_int from .compression import decBzip2, decGzip, decZlib from . import compression as compression_module from . import fabioutils BLOCKSIZE = 512 DATA_TYPES = {"SignedByte": numpy.int8, "Signed8": numpy.int8, "UnsignedByte": numpy.uint8, "Unsigned8": numpy.uint8, "SignedShort": numpy.int16, "Signed16": numpy.int16, "UnsignedShort": numpy.uint16, "Unsigned16": numpy.uint16, "UnsignedShortInteger": numpy.uint16, "SignedInteger": numpy.int32, "Signed32": numpy.int32, "UnsignedInteger": numpy.uint32, "Unsigned32": numpy.uint32, "SignedLong": numpy.int32, "UnsignedLong": numpy.uint32, "Signed64": numpy.int64, "Unsigned64": numpy.uint64, "FloatValue": numpy.float32, "FLOATVALUE": numpy.float32, "FLOAT": numpy.float32, # fit2d "Float": numpy.float32, # fit2d "FloatIEEE32": numpy.float32, "Float32": numpy.float32, "Double": numpy.float64, "DoubleValue": numpy.float64, "FloatIEEE64": numpy.float64, "DoubleIEEE64": numpy.float64} try: DATA_TYPES["FloatIEEE128"] = DATA_TYPES["DoubleIEEE128"] = DATA_TYPES["QuadrupleValue"] = numpy.float128 except AttributeError: # not in your numpy logger.debug("No support for float128 in your code") NUMPY_EDF_DTYPE = {"int8": "SignedByte", "int16": "SignedShort", "int32": "SignedInteger", "int64": "Signed64", "uint8": "UnsignedByte", "uint16": "UnsignedShort", "uint32": "UnsignedInteger", "uint64": "Unsigned64", "float32": "FloatValue", "float64": "DoubleValue", "float128": "QuadrupleValue", } MINIMUM_KEYS = ['HEADERID', 'IMAGE', 'BYTEORDER', 'DATATYPE', 'DIM_1', 'DIM_2', 'SIZE'] # Size is thought to be essential for writing at least DEFAULT_VALUES = {} # I do not define default values as they will be calculated at write time # JK20110415 class MalformedHeaderError(IOError): """Raised when a header is malformed""" pass class Frame(object): """ A class representing a single frame in an EDF file """ def __init__(self, data=None, header=None, number=None): self.header = EdfImage.check_header(header) self.capsHeader = {} for key in self.header: self.capsHeader[key.upper()] = key self._data = data self.dims = [] self.dim1 = 0 self.dim2 = 0 self.start = None # Position of start of raw data in file self.size = None # size of raw data in file self.file = None # opened file object with locking capabilities !!! self.bpp = None self.incomplete_data = False self._bytecode = None if (number is not None): self.iFrame = int(number) else: self.iFrame = 0 def parseheader(self, block): """ Parse the header in some EDF format from an already open file :param str block: string representing the header block. :return: size of the binary blob """ # reset values self.header = OrderedDict() self.capsHeader = {} self.size = None calcsize = 1 self.dims = [] # Why would someone put null bytes in a header? whitespace = string.whitespace + "\x00" for line in block.split(';'): if '=' in line: key, val = line.split('=', 1) key = key.strip(whitespace) self.header[key] = val.strip(whitespace) self.capsHeader[key.upper()] = key # Compute image size if "SIZE" in self.capsHeader: try: self.size = nice_int(self.header[self.capsHeader["SIZE"]]) except ValueError: logger.warning("Unable to convert to integer : %s %s " % (self.capsHeader["SIZE"], self.header[self.capsHeader["SIZE"]])) if "DIM_1" in self.capsHeader: try: dim1 = nice_int(self.header[self.capsHeader['DIM_1']]) except ValueError: logger.error("Unable to convert to integer Dim_1: %s %s" % (self.capsHeader["DIM_1"], self.header[self.capsHeader["DIM_1"]])) else: calcsize *= dim1 self.dims.append(dim1) else: logger.error("No Dim_1 in headers !!!") if "DIM_2" in self.capsHeader: try: dim2 = nice_int(self.header[self.capsHeader['DIM_2']]) except ValueError: logger.error("Unable to convert to integer Dim_2: %s %s" % (self.capsHeader["DIM_2"], self.header[self.capsHeader["DIM_2"]])) else: calcsize *= dim2 self.dims.append(dim2) else: logger.error("No Dim_2 in headers !!!") iDim = 3 # JON: this appears to be for nD images, but we don't treat those while iDim is not None: strDim = "DIM_%i" % iDim if strDim in self.capsHeader: try: dim3 = nice_int(self.header[self.capsHeader[strDim]]) except ValueError: logger.error("Unable to convert to integer %s: %s %s", strDim, self.capsHeader[strDim], self.header[self.capsHeader[strDim]]) dim3 = None iDim = None else: if dim3 > 1: # Otherwise treat dim3==1 as a 2D image calcsize *= dim3 self.dims.append(dim3) iDim += 1 else: logger.debug("No Dim_3 -> it is a 2D image") iDim = None if self._bytecode is None: if "DATATYPE" in self.capsHeader: self._bytecode = DATA_TYPES[self.header[self.capsHeader['DATATYPE']]] else: self._bytecode = numpy.uint16 logger.warning("Defaulting type to uint16") self.bpp = len(numpy.array(0, self._bytecode).tostring()) calcsize *= self.bpp if (self.size is None): self.size = calcsize elif (self.size != calcsize): if ("COMPRESSION" in self.capsHeader) and (self.header[self.capsHeader['COMPRESSION']].upper().startswith("NO")): logger.info("Mismatch between the expected size %s and the calculated one %s" % (self.size, calcsize)) self.size = calcsize for i, n in enumerate(self.dims): setattr(self, "dim%i" % (i + 1), n) return self.size def swap_needed(self): """ Decide if we need to byteswap """ if ('Low' in self.header[self.capsHeader['BYTEORDER']] and numpy.little_endian) or \ ('High' in self.header[self.capsHeader['BYTEORDER']] and not numpy.little_endian): return False if ('High' in self.header[self.capsHeader['BYTEORDER']] and numpy.little_endian) or \ ('Low' in self.header[self.capsHeader['BYTEORDER']] and not numpy.little_endian): if self.bpp in [2, 4, 8]: return True else: return False def getData(self): """ Unpack a binary blob according to the specification given in the header :return: dataset as numpy.ndarray """ data = None if self._data is not None: data = self._data elif self.file is None: data = self._data else: if self._bytecode is None: if "DATATYPE" in self.capsHeader: self._bytecode = DATA_TYPES[self.header[self.capsHeader["DATATYPE"]]] else: self._bytecode = numpy.uint16 dims = self.dims[:] dims.reverse() with self.file.lock: if self.file.closed: logger.error("file: %s from %s is closed. Cannot read data." % (self.file, self.file.filename)) return else: self.file.seek(self.start) try: fileData = self.file.read(self.size) except Exception as e: if isinstance(self.file, fabioutils.GzipFile): if compression_module.is_incomplete_gz_block_exception(e): return numpy.zeros(dims) raise e if ("COMPRESSION" in self.capsHeader): compression = self.header[self.capsHeader["COMPRESSION"]].upper() uncompressed_size = self.bpp for i in dims: uncompressed_size *= i if "OFFSET" in compression: try: import byte_offset # IGNORE:F0401 except ImportError as error: logger.error("Unimplemented compression scheme: %s (%s)" % (compression, error)) else: myData = byte_offset.analyseCython(fileData, size=uncompressed_size) rawData = myData.astype(self._bytecode).tostring() self.size = uncompressed_size elif compression == "NONE": rawData = fileData elif "GZIP" in compression: rawData = decGzip(fileData) self.size = uncompressed_size elif "BZ" in compression: rawData = decBzip2(fileData) self.size = uncompressed_size elif "Z" in compression: rawData = decZlib(fileData) self.size = uncompressed_size else: logger.warning("Unknown compression scheme %s" % compression) rawData = fileData else: rawData = fileData expected = self.size obtained = len(rawData) if expected > obtained: logger.error("Data stream is incomplete: %s < expected %s bytes" % (obtained, expected)) rawData += "\x00".encode("ascii") * (expected - obtained) elif expected < len(rawData): logger.info("Data stream contains trailing junk : %s > expected %s bytes" % (obtained, expected)) rawData = rawData[:expected] data = numpy.fromstring(rawData, self._bytecode).reshape(tuple(dims)) if self.swap_needed(): data.byteswap(True) self._data = data self._bytecode = data.dtype.type return data def setData(self, npa=None): """Setter for data in edf frame""" self._data = npa data = property(getData, setData, "property: (edf)frame.data, uncompress the datablock when needed") def getByteCode(self): if self._bytecode is None: self._bytecode = self.data.dtype.type return self._bytecode def setByteCode(self, _iVal): self._bytecode = _iVal bytecode = property(getByteCode, setByteCode) def getEdfBlock(self, force_type=None, fit2dMode=False): """ :param force_type: type of the dataset to be enforced like "float64" or "uint16" :type force_type: string or numpy.dtype :param boolean fit2dMode: enforce compatibility with fit2d and starts counting number of images at 1 :return: ascii header block + binary data block :rtype: python bytes with the concatenation of the ascii header and the binary data block """ if force_type is not None: data = self.data.astype(force_type) else: data = self.data fit2dMode = bool(fit2dMode) for key in self.header: KEY = key.upper() if KEY not in self.capsHeader: self.capsHeader[KEY] = key header = self.header.copy() header_keys = list(self.header.keys()) capsHeader = self.capsHeader.copy() listHeader = ["{\n"] # First of all clean up the headers: for i in capsHeader: if "DIM_" in i: header.pop(capsHeader[i]) header_keys.remove(capsHeader[i]) for KEY in ["SIZE", "EDF_BINARYSIZE", "EDF_HEADERSIZE", "BYTEORDER", "DATATYPE", "HEADERID", "IMAGE"]: if KEY in capsHeader: header.pop(capsHeader[KEY]) header_keys.remove(capsHeader[KEY]) if "EDF_DATABLOCKID" in capsHeader: header_keys.remove(capsHeader["EDF_DATABLOCKID"]) # but do not remove the value from dict, instead reset the key ... if capsHeader["EDF_DATABLOCKID"] != "EDF_DataBlockID": header["EDF_DataBlockID"] = header.pop(capsHeader["EDF_DATABLOCKID"]) capsHeader["EDF_DATABLOCKID"] = "EDF_DataBlockID" # Then update static headers freshly deleted header_keys.insert(0, "Size") header["Size"] = len(data.tostring()) header_keys.insert(0, "HeaderID") header["HeaderID"] = "EH:%06d:000000:000000" % (self.iFrame + fit2dMode) header_keys.insert(0, "Image") header["Image"] = str(self.iFrame + fit2dMode) dims = list(data.shape) nbdim = len(dims) for i in dims: key = "Dim_%i" % nbdim header[key] = i header_keys.insert(0, key) nbdim -= 1 header_keys.insert(0, "DataType") header["DataType"] = NUMPY_EDF_DTYPE[str(numpy.dtype(data.dtype))] header_keys.insert(0, "ByteOrder") if numpy.little_endian: header["ByteOrder"] = "LowByteFirst" else: header["ByteOrder"] = "HighByteFirst" approxHeaderSize = 100 for key in header: approxHeaderSize += 7 + len(key) + len(str(header[key])) approxHeaderSize = BLOCKSIZE * (approxHeaderSize // BLOCKSIZE + 1) header_keys.insert(0, "EDF_HeaderSize") header["EDF_HeaderSize"] = "%5s" % (approxHeaderSize) header_keys.insert(0, "EDF_BinarySize") header["EDF_BinarySize"] = data.nbytes header_keys.insert(0, "EDF_DataBlockID") if "EDF_DataBlockID" not in header: header["EDF_DataBlockID"] = "%i.Image.Psd" % (self.iFrame + fit2dMode) preciseSize = 4 # 2 before {\n 2 after }\n for key in header_keys: # Escape keys or values that are no ascii strKey = str(key) if not isAscii(strKey, listExcluded=["}", "{"]): logger.warning("Non ascii key %s, skipping" % strKey) continue strValue = str(header[key]) if not isAscii(strValue, listExcluded=["}", "{"]): logger.warning("Non ascii value %s, skipping" % strValue) continue line = strKey + " = " + strValue + " ;\n" preciseSize += len(line) listHeader.append(line) if preciseSize > approxHeaderSize: logger.error("I expected the header block only at %s in fact it is %s" % (approxHeaderSize, preciseSize)) for idx, line in enumerate(listHeader[:]): if line.startswith("EDF_HeaderSize"): headerSize = BLOCKSIZE * (preciseSize // BLOCKSIZE + 1) newline = "EDF_HeaderSize = %5s ;\n" % headerSize delta = len(newline) - len(line) if (preciseSize // BLOCKSIZE) != ((preciseSize + delta) // BLOCKSIZE): headerSize = BLOCKSIZE * ((preciseSize + delta) // BLOCKSIZE + 1) newline = "EDF_HeaderSize = %5s ;\n" % headerSize preciseSize = preciseSize + delta listHeader[idx] = newline break else: headerSize = approxHeaderSize listHeader.append(" " * (headerSize - preciseSize) + "}\n") return ("".join(listHeader)).encode("ASCII") + data.tostring() class EdfImage(FabioImage): """ Read and try to write the ESRF edf data format """ DESCRIPTION = "European Synchrotron Radiation Facility data format" DEFAULT_EXTENSIONS = ["edf", "cor"] RESERVED_HEADER_KEYS = ['HEADERID', 'IMAGE', 'BYTEORDER', 'DATATYPE', 'DIM_1', 'DIM_2', 'DIM_3', 'SIZE'] def __init__(self, data=None, header=None, frames=None): self.currentframe = 0 self.filesize = None self._incomplete_file = False if data is None: # In case of creation of an empty instance stored_data = None else: try: dim = len(data.shape) except Exception as error: # IGNORE:W0703 logger.debug("Data don't look like a numpy array (%s), resetting all!!" % error) dim = 0 if dim == 0: raise Exception("Data with empty shape is unsupported") elif dim == 1: logger.warning("Data in 1d dimension will be stored as a 2d dimension array") # make sure we do not change the shape of the input data stored_data = numpy.array(data, copy=False) stored_data.shape = (1, len(data)) elif dim == 2: stored_data = data elif dim >= 3: raise Exception("Data dimension too big. Only 1d or 2d arrays are supported.") FabioImage.__init__(self, stored_data, header) if frames is None: frame = Frame(data=self.data, header=self.header, number=self.currentframe) self._frames = [frame] else: self._frames = frames @staticmethod def check_header(header=None): """ Empty for FabioImage but may be populated by others classes """ if not isinstance(header, dict): return OrderedDict() new = OrderedDict() for key, value in header.items(): new[toAscii(key, ";{}")] = toAscii(value, ";{}") return new @staticmethod def _readHeaderBlock(infile, frame_id): """ Read in a header in some EDF format from an already open file :param fileid infile: file object open in read mode :param int frame_id: Informative frame ID :return: string (or None if no header was found. :raises MalformedHeaderError: If the header can't be read """ MAX_HEADER_SIZE = BLOCKSIZE * 20 try: block = infile.read(BLOCKSIZE) except Exception as e: if isinstance(infile, fabioutils.GzipFile): if compression_module.is_incomplete_gz_block_exception(e): raise MalformedHeaderError("Incomplete GZ block for header frame %i", frame_id) raise e if len(block) == 0: # end of file return None begin_block = block.find(b"{") if begin_block < 0: if len(block) < BLOCKSIZE and len(block.strip()) == 0: # Empty block looks to be a valid end of file return None logger.debug("Malformed header: %s", block) raise MalformedHeaderError("Header frame %i do not contains '{'" % frame_id) start = block[0:begin_block] if start.strip() != b"": logger.debug("Malformed header: %s", start) raise MalformedHeaderError("Header frame %i contains non-whitespace before '{'" % frame_id) if len(block) < BLOCKSIZE: logger.warning("Under-short header frame %i: only %i bytes", frame_id, len(block)) start = block.find(b"EDF_HeaderSize", begin_block) if start >= 0: equal = block.index(b"=", start + len(b"EDF_HeaderSize")) end = block.index(b";", equal + 1) try: chunk = block[equal + 1:end].strip() new_max_header_size = int(chunk) except Exception: logger.warning("Unable to read header size, got: %s", chunk) else: if new_max_header_size > MAX_HEADER_SIZE: logger.info("Redefining MAX_HEADER_SIZE to %s", new_max_header_size) MAX_HEADER_SIZE = new_max_header_size block_size = len(block) blocks = [block] end_pattern = re.compile(b"}[\r\n]") while True: end = end_pattern.search(block) if end is not None: end_block = block_size - len(block) + end.start() break block = infile.read(BLOCKSIZE) block_size += len(block) blocks.append(block) if len(block) == 0 or block_size > MAX_HEADER_SIZE: block = b"".join(blocks) logger.debug("Runaway header in EDF file MAX_HEADER_SIZE: %s\n%s", MAX_HEADER_SIZE, block) raise MalformedHeaderError("Runaway header frame %i (max size: %i)" % (frame_id, MAX_HEADER_SIZE)) block = b"".join(blocks) # Now it is essential to go to the start of the binary part if block[end_block: end_block + 3] == b"}\r\n": offset = end_block + 3 - len(block) elif block[end_block: end_block + 2] == b"}\n": offset = end_block + 2 - len(block) else: logger.warning("Malformed end of header block") offset = end_block + 2 - len(block) infile.seek(offset, os.SEEK_CUR) return block[begin_block:end_block].decode("ASCII") @property def incomplete_file(self): """Returns true if the file is not complete. :rtype: bool """ return self._incomplete_file def _readheader(self, infile): """ Read all headers in a file and populate self.header data is not yet populated :type infile: file object open in read mode """ self._frames = [] while True: try: block = self._readHeaderBlock(infile, len(self._frames)) except MalformedHeaderError as e: logger.debug("Backtrace", exc_info=True) if len(self._frames) == 0: raise IOError("Invalid first header") self._incomplete_file = True break if block is None: # end of file if len(self._frames) == 0: raise IOError("Empty file") break frame = Frame(number=self.nframes) size = frame.parseheader(block) frame.file = infile frame.start = infile.tell() frame.size = size self._frames += [frame] try: # skip the data block infile.seek(size - 1, os.SEEK_CUR) data = infile.read(1) if len(data) == 0: self._incomplete_file = True frame.incomplete_data = True # Out of the file break except Exception as error: if isinstance(infile, fabioutils.GzipFile): if compression_module.is_incomplete_gz_block_exception(error): self._incomplete_file = True frame.incomplete_data = True break logger.warning("infile is %s" % infile) logger.warning("Position is %s" % infile.tell()) logger.warning("size is %s" % size) logger.error("It seams this error occurs under windows when reading a (large-) file over network: %s ", error) raise Exception(error) for i, frame in enumerate(self._frames): missing = [] for item in MINIMUM_KEYS: if item not in frame.capsHeader: missing.append(item) if len(missing) > 0: logger.info("EDF file %s frame %i misses mandatory keys: %s ", self.filename, i, " ".join(missing)) self.currentframe = 0 def read(self, fname, frame=None): """ Read in header into self.header and the data into self.data """ self.resetvals() self.filename = fname infile = self._open(fname, "rb") try: self._readheader(infile) if frame is None: pass elif frame < self.nframes: self = self.getframe(frame) else: logger.error("Reading file %s You requested frame %s but only %s frames are available", fname, frame, self.nframes) self.resetvals() # ensure the PIL image is reset self.pilimage = None except Exception as e: infile.close() raise e return self def swap_needed(self): """ Decide if we need to byteswap :return: True if needed, False else and None if not understood """ if self.bpp == 1: return False if ('Low' in self.header[self.capsHeader['BYTEORDER']] and numpy.little_endian) or \ ('High' in self.header[self.capsHeader['BYTEORDER']] and not numpy.little_endian): return False if ('High' in self.header[self.capsHeader['BYTEORDER']] and numpy.little_endian) or \ ('Low' in self.header[self.capsHeader['BYTEORDER']] and not numpy.little_endian): return True def unpack(self): """ Unpack a binary blob according to the specification given in the header and return the dataset :return: dataset as numpy.ndarray """ return self._frames[self.currentframe].getData() def getframe(self, num): """ returns the file numbered 'num' in the series as a FabioImage """ newImage = None if self.nframes == 1: logger.debug("Single frame EDF; having FabioImage default behavior: %s" % num) newImage = FabioImage.getframe(self, num) newImage._file = self._file elif num < self.nframes: logger.debug("Multi frame EDF; having EdfImage specific behavior: %s/%s" % (num, self.nframes)) newImage = EdfImage(frames=self._frames) newImage.currentframe = num newImage.filename = self.filename newImage._file = self._file else: raise IOError("EdfImage.getframe: Cannot access frame: %s/%s" % (num, self.nframes)) return newImage def previous(self): """ returns the previous file in the series as a FabioImage """ newImage = None if self.nframes == 1: newImage = FabioImage.previous(self) else: newFrameId = self.currentframe - 1 newImage = self.getframe(newFrameId) return newImage def next(self): """Returns the next file in the series as a fabioimage :raise IOError: When there is no next file or image in the series. """ newImage = None if self.nframes == 1: newImage = FabioImage.next(self) else: newFrameId = self.currentframe + 1 newImage = self.getframe(newFrameId) return newImage def write(self, fname, force_type=None, fit2dMode=False): """ Try to write a file check we can write zipped also mimics that fabian was writing uint16 (we sometimes want floats) :param force_type: can be numpy.uint16 or simply "float" """ # correct for bug #27: read all data before opening the file in write mode if fname == self.filename: [(frame.header, frame.data) for frame in self._frames] # this is thrown away with self._open(fname, mode="wb") as outfile: for i, frame in enumerate(self._frames): frame.iFrame = i outfile.write(frame.getEdfBlock(force_type=force_type, fit2dMode=fit2dMode)) def appendFrame(self, frame=None, data=None, header=None): """ Method used add a frame to an EDF file :param frame: frame to append to edf image :type frame: instance of Frame """ if isinstance(frame, Frame): self._frames.append(frame) elif ("header" in dir(frame)) and ("data" in dir(frame)): self._frames.append(Frame(frame.data, frame.header)) else: self._frames.append(Frame(data, header)) def deleteFrame(self, frameNb=None): """ Method used to remove a frame from an EDF image. by default the last one is removed. :param int frameNb: frame number to remove, by default the last. """ if frameNb is None: self._frames.pop() else: self._frames.pop(frameNb) def fastReadData(self, filename=None): """ This is a special method that will read and return the data from another file ... The aim is performances, ... but only supports uncompressed files. :return: data from another file using positions from current EdfImage """ if (filename is None) or not os.path.isfile(filename): raise RuntimeError("EdfImage.fastReadData is only valid with another file: %s does not exist" % (filename)) data = None frame = self._frames[self.currentframe] with open(filename, "rb")as f: f.seek(frame.start) raw = f.read(frame.size) try: data = numpy.fromstring(raw, dtype=self.bytecode) data.shape = self.data.shape except Exception as error: logger.error("unable to convert file content to numpy array: %s", error) if self.swap_needed(): data.byteswap(True) return data def fastReadROI(self, filename, coords=None): """ Method reading Region of Interest of another file based on metadata available in current EdfImage. The aim is performances, ... but only supports uncompressed files. :return: ROI-data from another file using positions from current EdfImage :rtype: numpy 2darray """ if (filename is None) or not os.path.isfile(filename): raise RuntimeError("EdfImage.fastReadData is only valid with another file: %s does not exist" % (filename)) data = None frame = self._frames[self.currentframe] if len(coords) == 4: slice1 = self.make_slice(coords) elif (len(coords) == 2 and isinstance(coords[0], slice) and isinstance(coords[1], slice)): slice1 = coords else: logger.warning('readROI: Unable to understand Region Of Interest: got %s', coords) return d1 = self.data.shape[-1] start0 = slice1[0].start start1 = slice1[1].start slice2 = (slice(0, slice1[0].stop - start0, slice1[0].step), slice(0, slice1[1].stop - start1, slice1[1].step)) start = frame.start + self.bpp * (d1 * start0 + start1) size = self.bpp * ((slice2[0].stop) * d1) with open(filename, "rb")as f: f.seek(start) raw = f.read(size) try: data = numpy.fromstring(raw, dtype=self.bytecode) data.shape = -1, d1 except Exception as error: logger.error("unable to convert file content to numpy array: %s", error) if self.swap_needed(): data.byteswap(True) return data[slice2] ################################################################################ # Properties definition for header, data, header_keys and capsHeader ################################################################################ def getNbFrames(self): """ Getter for number of frames """ return len(self._frames) def setNbFrames(self, val): """ Setter for number of frames ... should do nothing. Here just to avoid bugs """ if val != len(self._frames): logger.warning("trying to set the number of frames ") nframes = property(getNbFrames, setNbFrames, "property: number of frames in EDF file") def getHeader(self): """ Getter for the headers. used by the property header, """ return self._frames[self.currentframe].header def setHeader(self, _dictHeader): """ Enforces the propagation of the header to the list of frames """ try: self._frames[self.currentframe].header = _dictHeader except AttributeError: self._frames = [Frame(header=_dictHeader)] except IndexError: if self.currentframe < len(self._frames): self._frames.append(Frame(header=_dictHeader)) def delHeader(self): """ Deleter for edf header """ self._frames[self.currentframe].header = {} header = property(getHeader, setHeader, delHeader, "property: header of EDF file") # def getHeaderKeys(self): # """ # Getter for edf header_keys # """ # return self._frames[self.currentframe].header_keys # def setHeaderKeys(self, _listtHeader): # """ # Enforces the propagation of the header_keys to the list of frames # :param _listtHeader: list of the (ordered) keys in the header # :type _listtHeader: python list # """ # try: # self._frames[self.currentframe].header_keys = _listtHeader # except AttributeError: # self._frames = [Frame(header_keys=_listtHeader)] # except IndexError: # if self.currentframe < len(self._frames): # self._frames.append(Frame(header_keys=_listtHeader)) # def delHeaderKeys(self): # """ # Deleter for edf header_keys # """ # self._frames[self.currentframe].header_keys = [] # header_keys = property(getHeaderKeys, setHeaderKeys, delHeaderKeys, "property: header_keys of EDF file") def getData(self): """ getter for edf Data :return: data for current frame :rtype: numpy.ndarray """ npaData = None try: npaData = self._frames[self.currentframe].data except AttributeError: self._frames = [Frame()] npaData = self._frames[self.currentframe].data except IndexError: if self.currentframe < len(self._frames): self._frames.append(Frame()) npaData = self._frames[self.currentframe].data return npaData def setData(self, _data): """ Enforces the propagation of the data to the list of frames :param _data: numpy array representing data """ try: self._frames[self.currentframe].data = _data except AttributeError: self._frames = [Frame(data=_data)] except IndexError: if self.currentframe < len(self._frames): self._frames.append(Frame(data=_data)) def delData(self): """ deleter for edf Data """ self._frames[self.currentframe].data = None data = property(getData, setData, delData, "property: data of EDF file") def getCapsHeader(self): """ getter for edf headers keys in upper case :return: data for current frame :rtype: dict """ return self._frames[self.currentframe].capsHeader def setCapsHeader(self, _data): """ Enforces the propagation of the header_keys to the list of frames :param _data: numpy array representing data """ self._frames[self.currentframe].capsHeader = _data def delCapsHeader(self): """ deleter for edf capsHeader """ self._frames[self.currentframe].capsHeader = {} capsHeader = property(getCapsHeader, setCapsHeader, delCapsHeader, "property: capsHeader of EDF file, i.e. the keys of the header in UPPER case.") def getDim1(self): return self._frames[self.currentframe].dim1 def setDim1(self, _iVal): try: self._frames[self.currentframe].dim1 = _iVal except AttributeError: self._frames = [Frame()] except IndexError: if self.currentframe < len(self._frames): self._frames.append(Frame()) self._frames[self.currentframe].dim1 = _iVal dim1 = property(getDim1, setDim1) def getDim2(self): return self._frames[self.currentframe].dim2 def setDim2(self, _iVal): try: self._frames[self.currentframe].dim2 = _iVal except AttributeError: self._frames = [Frame()] except IndexError: if self.currentframe < len(self._frames): self._frames.append(Frame()) self._frames[self.currentframe].dim2 = _iVal dim2 = property(getDim2, setDim2) def getDims(self): return self._frames[self.currentframe].dims dims = property(getDims) def getByteCode(self): return self._frames[self.currentframe].bytecode def setByteCode(self, _iVal): try: self._frames[self.currentframe].bytecode = _iVal except AttributeError: self._frames = [Frame()] except IndexError: if self.currentframe < len(self._frames): self._frames.append(Frame()) self._frames[self.currentframe].bytecode = _iVal bytecode = property(getByteCode, setByteCode) def getBpp(self): return self._frames[self.currentframe].bpp def setBpp(self, _iVal): try: self._frames[self.currentframe].bpp = _iVal except AttributeError: self._frames = [Frame()] except IndexError: if self.currentframe < len(self._frames): self._frames.append(Frame()) self._frames[self.currentframe].bpp = _iVal bpp = property(getBpp, setBpp) def isIncompleteData(self): return self._frames[self.currentframe].incomplete_data incomplete_data = property(isIncompleteData) edfimage = EdfImage fabio-0.6.0/fabio/kcdimage.py0000644001611600070440000001472413227357030017120 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """ Authors: Jerome Kieffer, ESRF email:jerome.kieffer@esrf.fr kcd images are 2D images written by the old KappaCCD diffractometer built by Nonius in the 1990's Based on the edfimage.py parser. """ # Get ready for python3: from __future__ import with_statement, print_function import numpy import logging import os import string from .fabioimage import FabioImage from .fabioutils import six logger = logging.getLogger(__name__) import io if not hasattr(io, "SEEK_END"): SEEK_END = 2 else: SEEK_END = io.SEEK_END DATA_TYPES = {"u16": numpy.uint16} MINIMUM_KEYS = [ # 'ByteOrder', Assume little by default 'Data type', 'X dimension', 'Y dimension', 'Number of readouts'] DEFAULT_VALUES = {"Data type": "u16"} if six.PY2: ALPHANUM = string.digits + string.letters + ". " else: ALPHANUM = bytes(string.digits + string.ascii_letters + ". ", encoding="ASCII") class KcdImage(FabioImage): """ Read the Nonius kcd data format """ DESCRIPTION = "KCD file format from Nonius's KappaCCD diffractometer" DEFAULT_EXTENSIONS = ["kcd"] def _readheader(self, infile): """ Read in a header in some KCD format from an already open file """ one_line = infile.readline() asciiHeader = True for oneChar in one_line.strip(): if oneChar not in ALPHANUM: asciiHeader = False if asciiHeader is False: # This does not look like an KappaCCD file logger.warning("First line of %s does not seam to be ascii text!" % infile.name) end_of_headers = False while not end_of_headers: one_line = infile.readline() try: one_line = one_line.decode("ASCII") except UnicodeDecodeError: end_of_headers = True else: if len(one_line) > 100: end_of_headers = True if not end_of_headers: if one_line.strip() == "Binned mode": one_line = "Mode = Binned" if "=" in one_line: key, val = one_line.split('=', 1) key = key.strip() self.header[key] = val.strip() else: end_of_headers = True missing = [] for item in MINIMUM_KEYS: if item not in self.header: missing.append(item) if len(missing) > 0: logger.debug("KCD file misses the keys " + " ".join(missing)) def read(self, fname, frame=None): """ Read in header into self.header and the data into self.data """ self.header = self.check_header() self.resetvals() with self._open(fname, "rb") as infile: self._readheader(infile) # Compute image size try: self.dim1 = int(self.header['X dimension']) self.dim2 = int(self.header['Y dimension']) except (KeyError, ValueError): raise IOError("KCD file %s is corrupt, cannot read it" % fname) try: bytecode = DATA_TYPES[self.header['Data type']] self.bpp = len(numpy.array(0, bytecode).tostring()) except KeyError: bytecode = numpy.uint16 self.bpp = 2 logger.warning("Defaulting type to uint16") try: nbReadOut = int(self.header['Number of readouts']) except KeyError: logger.warning("Defaulting number of ReadOut to 1") nbReadOut = 1 expected_size = self.dim1 * self.dim2 * self.bpp * nbReadOut try: infile.seek(-expected_size, SEEK_END) except: logger.warning("seeking from end is not implemeneted for file %s", fname) if hasattr(infile, "measure_size"): fileSize = infile.measure_size() elif hasattr(infile, "size"): fileSize = infile.size elif hasattr(infile, "getSize"): fileSize = infile.getSize() else: logger.warning("Unable to guess the file-size of %s", fname) fileSize = os.stat(fname)[6] infile.seek(fileSize - expected_size - infile.tell(), 1) block = infile.read(expected_size) # infile.close() # now read the data into the array self.data = numpy.zeros((self.dim2, self.dim1), numpy.int32) stop = 0 for i in range(nbReadOut): start = stop stop = (i + 1) * expected_size // nbReadOut data = numpy.fromstring(block[start: stop], bytecode) data.shape = self.dim2, self.dim1 if not numpy.little_endian: data.byteswap(True) self.data += data self.bytecode = self.data.dtype.type self.resetvals() # ensure the PIL image is reset self.pilimage = None return self @staticmethod def checkData(data=None): if data is None: return None else: return data.astype(int) kcdimage = KcdImage fabio-0.6.0/fabio/file_series.py0000644001611600070440000002647013227357030017646 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ Authors: ........ * Henning O. Sorensen & Erik Knudsen Center for Fundamental Research: Metal Structures in Four Dimensions Risoe National Laboratory Frederiksborgvej 399 DK-4000 Roskilde email:erik.knudsen@risoe.dk * Jon Wright, ESRF """ # Get ready for python3: from __future__ import absolute_import, print_function, with_statement, division import logging import sys logger = logging.getLogger(__name__) import traceback as pytraceback from .fabioutils import FilenameObject, next_filename from .openimage import openimage def new_file_series0(first_object, first=None, last=None, step=1): """ Created from a fabio image first and last are file numbers """ im = first_object nimages = 0 # for counting images if None in (first, last): step = 0 total = 1 else: total = last - first yield im while nimages < total: nimages += step try: newim = im.next() im = newim except Exception as error: pytraceback.print_exc() # Skip bad images logger.warning("Got a problem here: %s", error) try: im.filename = next_filename(im.filename) except Exception as error: # KE: This will not work and will throw an exception # fabio.next_filename doesn't understand %nnnn on the end logger.warning("Got another problem here: %s", error) im.filename = next_filename(im.sequencefilename) yield None yield im def new_file_series(first_object, nimages=0, step=1, traceback=False): """ A generator function that creates a file series starting from a a fabioimage. Iterates through all images in a file (if more than 1), then proceeds to the next file as determined by fabio.next_filename. :param first_object: the starting fabioimage, which will be the first one yielded in the sequence :param nimages: the maximum number of images to consider step: step size, will yield the first and every step'th image until nimages is reached. (e.g. nimages = 5, step = 2 will yield 3 images (0, 2, 4) :param traceback: if True causes it to print a traceback in the event of an exception (missing image, etc.). Otherwise the calling routine can handle the exception as it chooses :param yields: the next fabioimage in the series. In the event there is an exception, it yields the sys.exec_info for the exception instead. sys.exec_info is a tuple: ( exceptionType, exceptionValue, exceptionTraceback ) from which all the exception information can be obtained. Suggested usage: :: for obj in new_file_series( ... ): if not isinstance(obj, fabio.fabioimage.fabioimage ): # deal with errors like missing images, non readable files, etc # e.g. traceback.print_exception(obj[0], obj[1], obj[2]) """ im = first_object nprocessed = 0 abort = False if nimages > 0: yield im nprocessed += 1 while nprocessed < nimages: try: newim = im.next() im = newim retVal = im except Exception as ex: retVal = sys.exc_info() if(traceback): pytraceback.print_exc() # Skip bad images logger.warning("Got a problem here: next() failed %s", ex) # Skip bad images try: im.filename = next_filename(im.filename) except Exception as ex: logger.warning("Got another problem here: next_filename(im.filename) %s", ex) if nprocessed % step == 0: yield retVal # Avoid cyclic references with exc_info ? retVal = None if abort: break nprocessed += 1 class file_series(list): """ Represents a series of files to iterate has an idea of a current position to do next and prev You also get from the list python superclass: append count extend insert pop remove reverse sort """ def __init__(self, list_of_strings): """ Constructor: :param list_of_strings: arg should be a list of strings which are filenames """ super(file_series, self).__init__(list_of_strings) # track current position in list self._current = 0 # methods which return a filename def first(self): """ First image in series """ return self[0] def last(self): """ Last in series """ return self[-1] def previous(self): """ Prev in a sequence """ self._current -= 1 return self[self._current] def current(self): """Current position in a sequence """ return self[self._current] def next(self): """ Next in a sequence """ self._current += 1 return self[self._current] def jump(self, num): """ Goto a position in sequence """ assert num < len(self) and num >= 0, "num out of range" self._current = num return self[self._current] def len(self): """ Number of files """ return len(self) # Methods which return a fabioimage def first_image(self): """ First image in a sequence :return: fabioimage """ return openimage(self.first()) def last_image(self): """ Last image in a sequence :return: fabioimage """ return openimage(self.last()) def next_image(self): """ Return the next image :return: fabioimage """ return openimage(self.next()) def previous_image(self): """ Return the previous image :return: fabioimage """ return openimage(self.previous()) def jump_image(self, num): """ Jump to and read image :return: fabioimage """ return openimage(self.jump(num)) def current_image(self): """ Current image in sequence :return: fabioimage """ return openimage(self.current()) # methods which return a file_object def first_object(self): """ First image in a sequence :return: file_object """ return FilenameObject(self.first()) def last_object(self): """ Last image in a sequence :return: file_object """ return FilenameObject(self.last()) def next_object(self): """ Return the next image :return: file_object """ return FilenameObject(self.next()) def previous_object(self): """ Return the previous image :return: file_object """ return FilenameObject(self.previous()) def jump_object(self, num): """ Jump to and read image :return: file_object """ return FilenameObject(self.jump(num)) def current_object(self): """ Current image in sequence :return: file_object """ return FilenameObject(self.current()) class numbered_file_series(file_series): """ mydata0001.edf = "mydata" + 0001 + ".edf" mydata0002.edf = "mydata" + 0002 + ".edf" mydata0003.edf = "mydata" + 0003 + ".edf" """ def __init__(self, stem, first, last, extension, digits=4, padding='Y', step=1): """ Constructor :param stem: first part of the name :param step: in case of every nth file :param padding: possibility for specifying that numbers are not padded with zeroes up to digits """ if padding == 'Y': fmt = "%s%0" + str(digits) + "d%s" else: fmt = "%s%i%s" super(numbered_file_series, self).__init__([fmt % (stem, i, extension) for i in range(first, last + 1, step)]) class filename_series: """ Much like the others, but created from a string filename """ def __init__(self, filename): """ create from a filename (String)""" self.obj = FilenameObject(filename) def next(self): """ increment number """ self.obj.num += 1 return self.obj.tostring() def previous(self): """ decrement number """ self.obj.num -= 1 return self.obj.tostring() def current(self): """ return current filename string""" return self.obj.tostring() def jump(self, num): """ jump to a specific number """ self.obj.num = num return self.obj.tostring() # image methods def next_image(self): """ returns the next image as a fabioimage """ return openimage(self.next()) def prev_image(self): """ returns the previos image as a fabioimage """ return openimage(self.previous()) def current_image(self): """ returns the current image as a fabioimage""" return openimage(self.current()) def jump_image(self, num): """ returns the image number as a fabioimage""" return openimage(self.jump(num)) # object methods def next_object(self): """ returns the next filename as a fabio.FilenameObject""" self.obj.num += 1 return self.obj def previous_object(self): """ returns the previous filename as a fabio.FilenameObject""" self.obj.num -= 1 return self.obj def current_object(self): """ returns the current filename as a fabio.FilenameObject""" return self.obj def jump_object(self, num): """ returns the filename num as a fabio.FilenameObject""" self.obj.num = num return self.obj fabio-0.6.0/fabio/converters.py0000644001611600070440000000620513227357030017541 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. """Converter module. This is for the moment empty (populated only with almost pass through anonymous functions) but aims to be populated with more sofisticated translators... """ # get ready for python3 from __future__ import with_statement, print_function __author__ = "Jérôme Kieffer" __contact__ = "jerome.kieffer@esrf.eu" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" import logging logger = logging.getLogger(__name__) def convert_data_integer(data): """ convert data to integer """ if data is not None: return data.astype(int) else: return data CONVERSION_HEADER = { ("edfimage", "edfimage"): lambda header: header, } CONVERSION_DATA = { ("edfimage", "edfimage"): lambda data: data, ("edfimage", "cbfimage"): convert_data_integer, ("edfimage", "mar345image"): convert_data_integer, ("edfimage", "fit2dmaskimage"): convert_data_integer, ("edfimage", "kcdimage"): convert_data_integer, ("edfimage", "OXDimage"): convert_data_integer, ("edfimage", "pnmimage"): convert_data_integer, } def convert_data(inp, outp, data): """ Return data converted to the output format ... over-simplistic implementation for the moment... :param str inp: input format (like "cbfimage") :param str outp: output format (like "cbfimage") :param numpy.ndarray data: the actual dataset to be transformed """ return CONVERSION_DATA.get((inp, outp), lambda data: data)(data) def convert_header(inp, outp, header): """ Return header converted to the output format :param str inp: input format (like "cbfimage") :param str outp: output format (like "cbfimage") :param dict header: the actual set of headers to be transformed """ return CONVERSION_HEADER.get((inp, outp), lambda header: header)(header) fabio-0.6.0/fabio/OXDimage.py0000644001611600070440000006660213227357030017013 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """Reads Oxford Diffraction Sapphire 3 images Authors: ........ * Henning O. Sorensen & Erik Knudsen: Center for Fundamental Research: Metal Structures in Four Dimensions; Risoe National Laboratory; Frederiksborgvej 399; DK-4000 Roskilde; email:erik.knudsen@risoe.dk * Jon Wright, Jérôme Kieffer & Gaël Goret: European Synchrotron Radiation Facility; Grenoble (France) """ # Get ready for python3: from __future__ import with_statement, print_function __contact__ = "Jerome.Kieffer@esrf.fr" __license__ = "MIT" __copyright__ = "Jérôme Kieffer" __date__ = "27/07/2017" import time import logging import struct logger = logging.getLogger(__name__) import numpy from .fabioimage import FabioImage from .compression import decTY1, compTY1 from .fabioutils import to_str from .utils.mathutils import rad2deg, deg2rad DETECTOR_TYPES = {0: 'Sapphire/KM4CCD (1x1: 0.06mm, 2x2: 0.12mm)', 1: 'Sapphire2-Kodak (1x1: 0.06mm, 2x2: 0.12mm)', 2: 'Sapphire3-Kodak (1x1: 0.03mm, 2x2: 0.06mm, 4x4: 0.12mm)', 3: 'Onyx-Kodak (1x1: 0.06mm, 2x2: 0.12mm, 4x4: 0.24mm)', 4: 'Unknown Oxford diffraction detector', 7: 'Pilatus 300K-Dectris'} DEFAULT_HEADERS = {'Header Version': 'OD SAPPHIRE 3.0', 'Compression': "TY1", 'Header Size In Bytes': 5120, "ASCII Section size in Byte": 256, "General Section size in Byte": 512, "Special Section size in Byte": 768, "KM4 Section size in Byte": 1024, "Statistic Section in Byte": 512, "History Section in Byte": 2048, 'NSUPPLEMENT': 0 } class OxdImage(FabioImage): """ Oxford Diffraction Sapphire 3 images reader/writer class Note: We assume the binary format is alway little-endian, is this True ? """ DESCRIPTION = "Oxford Diffraction Sapphire 3 file format" DEFAULT_EXTENSIONS = ["img"] def _readheader(self, infile): infile.seek(0) # Ascii header part 256 byes long self.header['Header Version'] = to_str(infile.readline()[:-2]) block = infile.readline() self.header['Compression'] = to_str(block[12:15]) block = infile.readline() self.header['NX'] = int(block[3:7]) self.header['NY'] = int(block[11:15]) self.header['OI'] = int(block[19:26]) self.header['OL'] = int(block[30:37]) block = infile.readline() self.header['Header Size In Bytes'] = int(block[8:15]) self.header['General Section size in Byte'] = int(block[19:26]) self.header['Special Section size in Byte'] = int(block[30:37]) self.header['KM4 Section size in Byte'] = int(block[41:48]) self.header['Statistic Section in Byte'] = int(block[52:59]) self.header['History Section in Byte'] = int(block[63:]) block = infile.readline() self.header['NSUPPLEMENT'] = int(block[12:19]) block = infile.readline() self.header['Time'] = to_str(block[5:29]) header_version = float(self.header['Header Version'].split()[2]) if header_version < 4.0: # for all our test files with header version 3.0 # ascii_section_size == 256 # but that's a legacy code ascii_section_size = self.header['Header Size In Bytes'] - ( self.header['General Section size in Byte'] + self.header['Special Section size in Byte'] + self.header['KM4 Section size in Byte'] + self.header['Statistic Section in Byte'] + self.header['History Section in Byte']) else: ascii_section_size = DEFAULT_HEADERS["ASCII Section size in Byte"] self.header["ASCII Section size in Byte"] = ascii_section_size # Skip to general section (NG) 512 byes long <<<<<<" infile.seek(self.header["ASCII Section size in Byte"]) block = infile.read(self.header['General Section size in Byte']) self.header['Binning in x'] = struct.unpack(" 0: raw16 = infile.read(self.header['OI'] * 2) if self.header['OL'] > 0: raw32 = infile.read(self.header['OL'] * 4) # endianess is handled at the decompression level raw_data = decTY1(raw8, raw16, raw32) bytecode = raw_data.dtype elif self.header['Compression'] == 'TY5': logger.info("Compressed with the TY5 compression") bytecode = numpy.int8 self.bpp = 1 raw8 = infile.read(self.dim1 * self.dim2) raw_data = numpy.fromstring(raw8, bytecode) if self.header['OI'] > 0: self.raw16 = infile.read(self.header['OI'] * 2) else: self.raw16 = b"" if self.header['OL'] > 0: self.raw32 = infile.read(self.header['OL'] * 4) else: self.raw32 = b"" self.rest = infile.read() self.blob = raw8 + self.raw16 + self.raw32 + self.rest raw_data = self.dec_TY5(raw8 + self.raw16 + self.raw32) else: bytecode = numpy.int32 self.bpp = len(numpy.array(0, bytecode).tostring()) nbytes = self.dim1 * self.dim2 * self.bpp raw_data = numpy.fromstring(infile.read(nbytes), bytecode) # Always assume little-endian on the disk if not numpy.little_endian: raw_data.byteswap(True) # infile.close() logger.debug('OVER_SHORT2: %s', raw_data.dtype) logger.debug("%s" % (raw_data < 0).sum()) logger.debug("BYTECODE: %s", bytecode) self.data = raw_data.reshape((self.dim2, self.dim1)) self.bytecode = self.data.dtype.type self.pilimage = None return self def _writeheader(self): """ :return: a string containing the header for Oxford images """ linesep = "\r\n" for key in DEFAULT_HEADERS: if key not in self.header: self.header[key] = DEFAULT_HEADERS[key] if "NX" not in self.header.keys() or "NY" not in self.header.keys(): self.header['NX'] = self.dim1 self.header['NY'] = self.dim2 ascii_headers = [self.header['Header Version'], "COMPRESSION=%s (%5.1f)" % (self.header["Compression"], self.getCompressionRatio()), "NX=%4i NY=%4i OI=%7i OL=%7i " % (self.header["NX"], self.header["NY"], self.header["OI"], self.header["OL"]), "NHEADER=%7i NG=%7i NS=%7i NK=%7i NS=%7i NH=%7i" % (self.header['Header Size In Bytes'], self.header['General Section size in Byte'], self.header['Special Section size in Byte'], self.header['KM4 Section size in Byte'], self.header['Statistic Section in Byte'], self.header['History Section in Byte']), "NSUPPLEMENT=%7i" % (self.header["NSUPPLEMENT"])] if "Time" in self.header: ascii_headers.append("TIME=%s" % self.header["Time"]) else: ascii_headers.append("TIME=%s" % time.ctime()) header = (linesep.join(ascii_headers)).ljust(256).encode("ASCII") NG = Section(self.header['General Section size in Byte'], self.header) NG.setData('Binning in x', 0, numpy.uint16) NG.setData('Binning in y', 2, numpy.uint16) NG.setData('Detector size x', 22, numpy.uint16) NG.setData('Detector size y', 24, numpy.uint16) NG.setData('Pixels in x', 26, numpy.uint16) NG.setData('Pixels in y', 28, numpy.uint16) NG.setData('No of pixels', 36, numpy.uint32) header += NG.__repr__() NS = Section(self.header['Special Section size in Byte'], self.header) NS.setData('Gain', 56, numpy.float) NS.setData('Overflows flag', 464, numpy.int16) NS.setData('Overflow after remeasure flag', 466, numpy.int16) NS.setData('Overflow threshold', 472, numpy.int32) NS.setData('Exposure time in sec', 480, numpy.float) NS.setData('Overflow time in sec', 488, numpy.float) NS.setData('Monitor counts of raw image 1', 528, numpy.int32) NS.setData('Monitor counts of raw image 2', 532, numpy.int32) NS.setData('Monitor counts of overflow raw image 1', 536, numpy.int32) NS.setData('Monitor counts of overflow raw image 2', 540, numpy.int32) NS.setData('Unwarping', 544, numpy.int32) if 'Detector type' in self.header: for key, value in DETECTOR_TYPES.items(): if value == self.header['Detector type']: NS.setData(None, 548, numpy.int32, default=key) NS.setData('Real pixel size x (mm)', 568, numpy.float) NS.setData('Real pixel size y (mm)', 576, numpy.float) header += NS.__repr__() KM = Section(self.header['KM4 Section size in Byte'], self.header) KM.setData('Spatial correction file date', 0, "|S26") KM.setData('Spatial correction file', 26, "|S246") # Angles are in steps due to stepper motors - conversion factor RAD # angle[0] = omega, angle[1] = theta, angle[2] = kappa, angle[3] = phi, if self.header.get('Omega step in deg', None): KM.setData(None, 368, numpy.float64, deg2rad(self.header["Omega step in deg"])) if self.header.get('Omega start in deg', None): KM.setData(None, 284, numpy.int32, self.header["Omega start in deg"] / self.header["Omega step in deg"]) if self.header.get('Omega end in deg', None): KM.setData(None, 324, numpy.int32, self.header["Omega end in deg"] / self.header["Omega step in deg"]) if self.header.get('Omega zero corr. in deg', None): KM.setData(None, 512, numpy.int32, self.header['Omega zero corr. in deg'] / self.header["Omega step in deg"]) if self.header.get('Theta step in deg', None): KM.setData(None, 368 + 8, numpy.float64, deg2rad(self.header["Theta step in deg"])) if self.header.get('Theta start in deg', None): KM.setData(None, 284 + 4, numpy.int32, self.header["Theta start in deg"] / self.header["Theta step in deg"]) if self.header.get('Theta end in deg', None): KM.setData(None, 324 + 4, numpy.int32, self.header["Theta end in deg"] / self.header["Theta step in deg"]) if self.header.get('Theta zero corr. in deg', None): KM.setData(None, 512 + 4, numpy.int32, self.header['Theta zero corr. in deg'] / self.header["Theta step in deg"]) if self.header.get('Kappa step in deg', None): KM.setData(None, 368 + 16, numpy.float64, deg2rad(self.header["Kappa step in deg"])) if self.header.get('Kappa start in deg', None): KM.setData(None, 284 + 8, numpy.int32, self.header["Kappa start in deg"] / self.header["Kappa step in deg"]) if self.header.get('Kappa end in deg', None): KM.setData(None, 324 + 8, numpy.int32, self.header["Kappa end in deg"] / self.header["Kappa step in deg"]) if self.header.get('Kappa zero corr. in deg', None): KM.setData(None, 512 + 8, numpy.int32, self.header['Kappa zero corr. in deg'] / self.header["Kappa step in deg"]) if self.header.get('Phi step in deg', None): KM.setData(None, 368 + 24, numpy.float64, deg2rad(self.header["Phi step in deg"])) if self.header.get('Phi start in deg', None): KM.setData(None, 284 + 12, numpy.int32, self.header["Phi start in deg"] / self.header["Phi step in deg"]) if self.header.get('Phi end in deg', None): KM.setData(None, 324 + 12, numpy.int32, self.header["Phi end in deg"] / self.header["Phi step in deg"]) if self.header.get('Phi zero corr. in deg', None): KM.setData(None, 512 + 12, numpy.int32, self.header['Phi zero corr. in deg'] / self.header["Phi step in deg"]) # Beam rotation about e2,e3 KM.setData('Beam rot in deg (e2)', 552, numpy.float64) KM.setData('Beam rot in deg (e3)', 560, numpy.float64) # Wavelenghts alpha1, alpha2, beta KM.setData('Wavelength alpha1', 568, numpy.float64) KM.setData('Wavelength alpha2', 576, numpy.float64) KM.setData('Wavelength alpha', 584, numpy.float64) KM.setData('Wavelength beta', 592, numpy.float64) # Detector tilts around e1,e2,e3 in deg KM.setData('Detector tilt e1 in deg', 640, numpy.float64) KM.setData('Detector tilt e2 in deg', 648, numpy.float64) KM.setData('Detector tilt e3 in deg', 656, numpy.float64) # Beam center KM.setData('Beam center x', 664, numpy.float64) KM.setData('Beam center y', 672, numpy.float64) # Angle (alpha) between kappa rotation axis and e3 (ideally 50 deg) KM.setData('Alpha angle in deg', 680, numpy.float64) # Angle (beta) between phi rotation axis and e3 (ideally 0 deg) KM.setData('Beta angle in deg', 688, numpy.float64) # Detector distance KM.setData('Distance in mm', 712, numpy.float64) header += KM.__repr__() SS = Section(self.header['Statistic Section in Byte'], self.header) SS.setData('Stat: Min ', 0, numpy.int32) SS.setData('Stat: Max ', 4, numpy.int32) SS.setData('Stat: Average ', 24, numpy.float64) if self.header.get('Stat: Stddev ', None): SS.setData(None, 32, numpy.float64, self.header['Stat: Stddev '] ** 2) SS.setData('Stat: Skewness ', 40, numpy.float64) header += SS.__repr__() HS = Section(self.header['History Section in Byte'], self.header) HS.setData('Flood field image', 99, "|S27") header += HS.__repr__() return header def write(self, fname): """Write Oxford diffraction images: this is still beta Only TY1 compressed images is currently possible :param fname: output filename """ if self.header.get("Compression") != "TY1": logger.warning("Enforce TY1 compression") self.header["Compression"] = "TY1" datablock8, datablock16, datablock32 = compTY1(self.data) self.header["OI"] = len(datablock16) / 2 self.header["OL"] = len(datablock32) / 4 with self._open(fname, mode="wb") as outfile: outfile.write(self._writeheader()) outfile.write(datablock8) outfile.write(datablock16) outfile.write(datablock32) def getCompressionRatio(self): "calculate the compression factor obtained vs raw data" return 100.0 * (self.data.size + 2 * self.header["OI"] + 4 * self.header["OL"]) / (self.data.size * 4) @staticmethod def checkData(data=None): if data is None: return None else: return data.astype(int) def dec_TY5(self, stream): """ Attempt to decode TY5 compression scheme :param stream: input stream :return: 1D array with data """ logger.info("TY5 decompression is slow for now") array_size = self.dim1 * self.dim2 stream_size = len(stream) data = numpy.zeros(array_size) raw = numpy.fromstring(stream, dtype="uint8") pos_inp = pos_out = current = ex1 = ex2 = 0 while pos_inp < stream_size and pos_out < array_size: if pos_out % self.dim2 == 0: last = 0 else: last = current value = raw[pos_inp] if value < 254: # this is the normal case # 1 bytes encode one pixel current = last + value - 127 pos_inp += 1 elif value == 254: ex1 += 1 # this is the special case 1: # if the marker 254 is found the next 2 bytes encode one pixel value = raw[pos_inp + 1:pos_inp + 3].view("int16") if not numpy.little_endian: value = value.byteswap(True) current = last + value[0] pos_inp += 3 elif value == 255: # this is the special case 2: # if the marker 255 is found the next 4 bytes encode one pixel ex2 += 1 logger.info('special case 32 bits.') value = raw[pos_inp + 1:pos_inp + 5].view("int32") if not numpy.little_endian: value = value.byteswap(True) current = last + value[0] pos_inp += 5 data[pos_out] = current pos_out += 1 logger.info("TY5: Exception: 16bits: %s, 32bits: %s", ex1, ex2) return data OXDimage = OxdImage class Section(object): """ Small helper class for writing binary headers """ def __init__(self, size, dictHeader): """ :param size: size of the header section in bytes :param dictHeader: headers of the image """ self.size = size self.header = dictHeader self.lstChr = bytearray(size) self._dictSize = {} def __repr__(self): return bytes(self.lstChr) def getSize(self, dtype): if dtype not in self._dictSize: self._dictSize[dtype] = len(numpy.zeros(1, dtype=dtype).tostring()) return self._dictSize[dtype] def setData(self, key, offset, dtype, default=None): """ :param offset: int, starting position in the section :param key: name of the header key :param dtype: type of the data to insert (defines the size!) """ if key in self.header: value = self.header[key] elif key in DEFAULT_HEADERS: value = DEFAULT_HEADERS[key] else: value = default if value is None: value = b"\x00" * self.getSize(dtype) elif numpy.little_endian: value = numpy.array(value).astype(dtype).tostring() else: value = numpy.array(value).astype(dtype).byteswap().tostring() self.lstChr[offset:offset + self.getSize(dtype)] = value fabio-0.6.0/fabio/xsdimage.py0000644001611600070440000001273513227357030017155 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: FabIO X-ray image reader # # Copyright (C) 2010-2016 European Synchrotron Radiation Facility # Grenoble, France # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ Authors: Jérôme Kieffer, ESRF email:jerome.kieffer@esrf.fr XSDimge are XML files containing numpy arrays """ # Get ready for python3: from __future__ import absolute_import, print_function, with_statement, division __author__ = "Jérôme Kieffer" __contact__ = "jerome.kieffer@esrf.eu" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" import logging import numpy import base64 import hashlib from .fabioimage import FabioImage from .fabioutils import six logger = logging.getLogger(__name__) try: import lxml.etree as etree except ImportError: try: # Try using the standard library import xml.etree.ElementTree as etree except ImportError: logger.warning("xml/lxml library is probably not part of your python installation: disabling xsdimage format") etree = None class XsdImage(FabioImage): """ Read the XSDataImage XML File data format """ DESCRIPTION = "XSDataImage XML file format" DEFAULT_EXTENSIONS = ["xml", "xsd"] def __init__(self, data=None, header=None, fname=None): """ Constructor of the class XSDataImage. :param str fname: the name of the file to open """ FabioImage.__init__(self, data=data, header=header) self.dims = [] self.size = None self.coding = None self.dtype = None self.rawData = None self.md5 = None if fname is not None: self.filename = fname self.read(fname) def read(self, fname, frame=None): """ """ self.header = {} self.resetvals() self.filename = fname with self._open(fname, "rb") as infile: self._readheader(infile) try: self.dim1, self.dim2 = self.dims[:2] except ValueError: raise IOError("XSD file %s is corrupt, no dimensions in it" % fname) exp_size = 1 for i in self.dims: exp_size *= i assert exp_size == self.size decData = None if self.coding == "base64": decData = base64.b64decode(self.rawData) elif self.coding == "base32": decData = base64.b32decode(self.rawData) elif self.coding == "base16": decData = base64.b16decode(self.rawData) else: logger.warning("Unable to recognize the encoding of the data !!! got %s, expected base64, base32 or base16, I assume it is base64 " % self.coding) decData = base64.b64decode(self.rawData) if self.md5: assert hashlib.md5(decData).hexdigest() == self.md5 self.data = numpy.fromstring(decData, dtype=self.dtype).reshape(tuple(self.dims)) if not numpy.little_endian: # by default little endian self.data.byteswap(True) self.resetvals() # # ensure the PIL image is reset self.pilimage = None return self def _readheader(self, infile): """ Read all headers in a file and populate self.header data is not yet populated :type infile: file object open in read mode """ xml = etree.parse(infile) self.dims = [] for i in xml.findall(".//shape"): try: self.dims.append(int(i.text)) except ValueError as error: logger.warning("%s Shape: Unable to convert %s to integer in %s" % (error, i.text, i)) for i in xml.findall(".//size"): try: self.size = int(i.text) except Exception as error: logger.warning("%s Size: Unable to convert %s to integer in %s" % (error, i.text, i)) self.dtype = None for i in xml.findall(".//dtype"): self.dtype = i.text self.coding = None for i in xml.findall(".//coding"): j = i.find("value") if j is not None: self.coding = j.text self.rawData = None for i in xml.findall(".//data"): self.rawData = six.b(i.text) self.md5 = None for i in xml.findall(".//md5sum"): j = i.find("value") if j is not None: self.md5 = j.text if etree is None: # Hide the class if it can't work XsdImage = None xsdimage = XsdImage fabio-0.6.0/fabio/readbytestream.py0000644001611600070440000000746313227357030020371 0ustar kiefferscisoft00000000000000# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE """Reads a bytestream Authors: Jon Wright Henning O. Sorensen & Erik Knudsen ESRF Risoe National Laboratory """ # Get ready for python3: from __future__ import with_statement, print_function, division import logging import numpy logger = logging.getLogger(__name__) DATATYPES = { # type sign bytes ("int", 'n', 1): numpy.uint8, ("int", 'n', 2): numpy.uint16, ("int", 'n', 4): numpy.uint32, ("int", 'y', 1): numpy.int8, ("int", 'y', 2): numpy.int16, ("int", 'y', 4): numpy.int32, ('float', 'y', 4): numpy.float32, # does this occur in bruker? ('double', 'y', 4): numpy.float64 } def readbytestream(fil, offset, x, y, nbytespp, datatype='int', signed='n', swap='n', typeout=numpy.uint16): """ Reads in a bytestream from a file (which may be a string indicating a filename, or an already opened file (should be "rb")) offset is the position (in bytes) where the pixel data start nbytespp = number of bytes per pixel type can be int or float (4 bytes pp) or double (8 bytes pp) signed: normally signed data 'y', but 'n' to try to get back the right numbers when unsigned data are converted to signed (python once had no unsigned numeric types.) swap, normally do not bother, but 'y' to swap bytes typeout is the numpy type to output, normally uint16, but more if overflows occurred x and y are the pixel dimensions TODO : Read in regions of interest PLEASE LEAVE THE STRANGE INTERFACE ALONE - IT IS USEFUL FOR THE BRUKER FORMAT """ tin = "dunno" length = nbytespp * x * y # bytes per pixel times number of pixels if datatype in ['float', 'double']: signed = 'y' key = (datatype, signed, nbytespp) try: tin = DATATYPES[key] except KeyError: logger.warning("datatype, signed, nbytespp: %s", str(key)) raise Exception("Unknown combination of types to readbytestream") # Did we get a string (filename) or a readable stream object? if hasattr(fil, "read") and hasattr(fil, "seek"): infile = fil opened = False else: infile = open(fil, 'rb') opened = True infile.seek(offset) data = numpy.fromstring(infile.read(length), tin) arr = numpy.array(numpy.reshape(data, (x, y)), typeout) if swap == 'y': arr = arr.byteswap() if opened: infile.close() return arr fabio-0.6.0/build-deb.sh0000755001611600070440000001674013227357030016110 0ustar kiefferscisoft00000000000000#!/bin/sh # # Project: Fabio Input/Output # https://github.com/silx-kit/fabio # # Copyright (C) 2015-2017 European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE # Script that builds a debian package from this library project=fabio source_project=python-fabio version=$(python -c"import version; print(version.version)") strictversion=$(python -c"import version; print(version.strictversion)") debianversion=$(python -c"import version; print(version.debianversion)") deb_name=$(echo "$source_project" | tr '[:upper:]' '[:lower:]') # target system if [ -f /etc/debian_version ] then debian_version=$(cat /etc/debian_version | cut -d. -f1 | grep -o '[0-9]*') if [ -z $debian_version ] then #we are probably on a ubuntu platform debian_version=$(cat /etc/debian_version | cut -d/ -f1) case $debian_version in squeeze) debian_version=6 ;; wheezy) debian_version=7 ;; jessie) debian_version=8 ;; stretch) debian_version=9 ;; esac fi else debian_version=0 fi target_system=debian${debian_version} project_directory="`dirname \"$0\"`" project_directory="`( cd \"$project_directory\" && pwd )`" # absolutized dist_directory=${project_directory}/dist/${target_system} build_directory=${project_directory}/build/${target_system} if [ -d /usr/lib/ccache ]; then export PATH=/usr/lib/ccache:$PATH fi usage="usage: $(basename "$0") [options] Build the Debian ${debian_version} package of the ${project} library. If the build succeed the directory dist/debian${debian_version} will contains the packages. optional arguments: --help show this help text --install install the packages generated at the end of the process using 'sudo dpkg' --debian7 Simulate a debian7 system (fail-safe) --debian8 Simulate a debian 8 Jessie system --debian9 Simulate a debian 9 Stretch system " install=0 use_python3=0 #used only for stdeb while : do case "$1" in -h | --help) echo "$usage" exit 0 ;; --install) install=1 shift ;; --python3) use_python3=1 shift ;; --debian7) debian_version=7 target_system=debian${debian_version} dist_directory=${project_directory}/dist/${target_system} build_directory=${project_directory}/build/${target_system} shift ;; --debian8) debian_version=8 target_system=debian${debian_version} dist_directory=${project_directory}/dist/${target_system} build_directory=${project_directory}/build/${target_system} shift ;; --debian9) debian_version=9 target_system=debian${debian_version} dist_directory=${project_directory}/dist/${target_system} build_directory=${project_directory}/build/${target_system} shift ;; -*) echo "Error: Unknown option: $1" >&2 echo "$usage" exit 1 ;; *) # No more options break ;; esac done clean_up() { echo "Clean working dir:" # clean up previous build rm -rf ${build_directory} # create the build context mkdir -p ${build_directory} } build_deb_8_plus () { echo "Build for debian 8 or newer using actual packaging" tarname=${project}_${debianversion}.orig.tar.gz clean_up python setup.py debian_src cp -f dist/${tarname} ${build_directory} if [ -f dist/${project}-testimages.tar.gz ] then cp -f dist/${project}-testimages.tar.gz ${build_directory} fi cd ${build_directory} tar -xzf ${tarname} directory=${project}-${strictversion} newname=${deb_name}_${debianversion}.orig.tar.gz #echo tarname $tarname newname $newname if [ $tarname != $newname ] then if [ -h $newname ] then rm ${newname} fi ln -s ${tarname} ${newname} fi if [ -f ${project}-testimages.tar.gz ] then if [ ! -h ${deb_name}_${debianversion}.orig-testimages.tar.gz ] then ln -s ${project}-testimages.tar.gz ${deb_name}_${debianversion}.orig-testimages.tar.gz fi fi cd ${directory} cp -r ${project_directory}/package/${target_system} debian cp ${project_directory}/copyright debian #handle test images if [ -f ../${deb_name}_${debianversion}.orig-testimages.tar.gz ] then if [ ! -d testimages ] then mkdir testimages fi cd testimages tar -xzf ../${deb_name}_${debianversion}.orig-testimages.tar.gz cd .. else # Disable to skip tests during build echo No test data #export PYBUILD_DISABLE_python2=test #export PYBUILD_DISABLE_python3=test #export DEB_BUILD_OPTIONS=nocheck fi dch -v ${debianversion}-1 "upstream development build of ${project} ${version}" dch --bpo "${project} snapshot ${version} built for ${target_system}" dpkg-buildpackage -r rc=$? if [ $rc -eq 0 ]; then # move packages to dist directory echo Build succeeded... rm -rf ${dist_directory} mkdir -p ${dist_directory} mv ${build_directory}/*.deb ${dist_directory} mv ${build_directory}/*.x* ${dist_directory} mv ${build_directory}/*.dsc ${dist_directory} mv ${build_directory}/*.changes ${dist_directory} cd ../../.. else echo Build failed, please investigate ... exit "$rc" fi } build_deb_7_minus () { echo "Build for debian 7 or older using stdeb" tarname=${project}-${strictversion}.tar.gz clean_up python setup.py sdist cp -f dist/${tarname} ${build_directory} cd ${build_directory} tar -xzf ${tarname} cd ${project}-${strictversion} if [ $use_python3 = 1 ] then echo Using Python 2+3 python3 setup.py --command-packages=stdeb.command sdist_dsc --with-python2=True --with-python3=True --no-python3-scripts=True build --no-cython bdist_deb rc=$? else echo Using Python 2 # bdist_deb feed /usr/bin using setup.py entry-points python setup.py --command-packages=stdeb.command build --no-cython bdist_deb rc=$? fi # move packages to dist directory rm -rf ${dist_directory} mkdir -p ${dist_directory} mv -f deb_dist/*.deb ${dist_directory} # back to the root cd ../../.. } if [ $debian_version -ge 8 ] then build_deb_8_plus else build_deb_7_minus fi if [ $install -eq 1 ]; then sudo -v su -c "dpkg -i ${dist_directory}/*.deb" fi exit "$rc" fabio-0.6.0/requirements.txt0000644001611600070440000000022713227357030017177 0ustar kiefferscisoft00000000000000#List of dependecies used by PIP but also by ReadTheDocs to generate the documentation on the fly numpy cython lxml sphinx sphinxcontrib-programoutput fabio-0.6.0/bootstrap.py0000755001611600070440000001662513227357030016316 0ustar kiefferscisoft00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- """ Bootstrap helps you to test scripts without installing them by patching your PYTHONPATH on the fly example: ./bootstrap.py ipython """ __authors__ = ["Frédéric-Emmanuel Picca", "Jérôme Kieffer"] __contact__ = "jerome.kieffer@esrf.eu" __license__ = "MIT" __date__ = "31/07/2017" import sys import os import distutils.util import subprocess import logging logging.basicConfig() logger = logging.getLogger("bootstrap") def is_debug_python(): """Returns true if the Python interpreter is in debug mode.""" try: import sysconfig except ImportError: # pragma nocover # Python < 2.7 import distutils.sysconfig as sysconfig if sysconfig.get_config_var("Py_DEBUG"): return True return hasattr(sys, "gettotalrefcount") def _distutils_dir_name(dname="lib"): """ Returns the name of a distutils build directory """ platform = distutils.util.get_platform() architecture = "%s.%s-%i.%i" % (dname, platform, sys.version_info[0], sys.version_info[1]) if is_debug_python(): architecture += "-pydebug" return architecture def _distutils_scripts_name(): """Return the name of the distrutils scripts sirectory""" f = "scripts-{version[0]}.{version[1]}" return f.format(version=sys.version_info) def _get_available_scripts(path): res = [] try: res = " ".join([s.rstrip('.py') for s in os.listdir(path)]) except OSError: res = ["no script available, did you ran " "'python setup.py build' before bootstrapping ?"] return res if sys.version_info[0] >= 3: # Python3 def execfile(fullpath, globals=None, locals=None): "Python3 implementation for execfile" with open(fullpath) as f: try: data = f.read() except UnicodeDecodeError: raise SyntaxError("Not a Python script") code = compile(data, fullpath, 'exec') exec(code, globals, locals) def run_file(filename, argv): """ Execute a script trying first to use execfile, then a subprocess :param str filename: Script to execute :param list[str] argv: Arguments passed to the filename """ full_args = [filename] full_args.extend(argv) try: logger.info("Execute target using exec") # execfile is considered as a local call. # Providing globals() as locals will force to feed the file into # globals() (for examples imports). # Without this any function call from the executed file loses imports try: old_argv = sys.argv sys.argv = full_args logger.info("Patch the sys.argv: %s", sys.argv) logger.info("Executing %s.main()", filename) print("########### EXECFILE ###########") module_globals = globals().copy() module_globals['__file__'] = filename execfile(filename, module_globals, module_globals) finally: sys.argv = old_argv except SyntaxError as error: logger.error(error) logger.info("Execute target using subprocess") env = os.environ.copy() env.update({"PYTHONPATH": LIBPATH + os.pathsep + os.environ.get("PYTHONPATH", ""), "PATH": os.environ.get("PATH", "")}) print("########### SUBPROCESS ###########") run = subprocess.Popen(full_args, shell=False, env=env) run.wait() def run_entry_point(entry_point, argv): """ Execute an entry_point using the current python context (http://setuptools.readthedocs.io/en/latest/setuptools.html#automatic-script-creation) :param str entry_point: A string identifying a function from a module (NAME = PACKAGE.MODULE:FUNCTION) """ import importlib elements = entry_point.split("=") target_name = elements[0].strip() elements = elements[1].split(":") module_name = elements[0].strip() function_name = elements[1].strip() logger.info("Execute target %s (function %s from module %s) using importlib", target_name, function_name, module_name) full_args = [target_name] full_args.extend(argv) try: old_argv = sys.argv sys.argv = full_args print("########### IMPORTLIB ###########") module = importlib.import_module(module_name) if hasattr(module, function_name): func = getattr(module, function_name) func() else: logger.info("Function %s not found", function_name) finally: sys.argv = old_argv def find_executable(target): """Find a filename from a script name. - Check the script name as file path, - Then checks if the name is a target of the setup.py - Then search the script from the PATH environment variable. :param str target: Name of the script :returns: Returns a tuple: kind, name. """ if os.path.isfile(target): return ("path", os.path.abspath(target)) # search the file from setup.py import setup config = setup.get_project_configuration(dry_run=True) # scripts from project configuration if "scripts" in config: for script_name in config["scripts"]: if os.path.basename(script) == target: return ("path", os.path.abspath(script_name)) # entry-points from project configuration if "entry_points" in config: for kind in config["entry_points"]: for entry_point in config["entry_points"][kind]: elements = entry_point.split("=") name = elements[0].strip() if name == target: return ("entry_point", entry_point) # search the file from env PATH for dirname in os.environ.get("PATH", "").split(os.pathsep): path = os.path.join(dirname, target) if os.path.isfile(path): return ("path", path) return None, None home = os.path.dirname(os.path.abspath(__file__)) LIBPATH = os.path.join(home, 'build', _distutils_dir_name('lib')) cwd = os.getcwd() os.chdir(home) build = subprocess.Popen([sys.executable, "setup.py", "build"], shell=False, cwd=os.path.dirname(os.path.abspath(__file__))) build_rc = build.wait() os.chdir(cwd) if build_rc == 0: logger.info("Build process ended.") else: logger.error("Build process ended with rc=%s", build_rc) sys.exit(-1) if __name__ == "__main__": if len(sys.argv) < 2: logger.warning("usage: ./bootstrap.py