pax_global_header 0000666 0000000 0000000 00000000064 14456506420 0014520 g ustar 00root root 0000000 0000000 52 comment=3b958af172dc1f85a3660c06f352eecfc4d68d4c
igor2-0.5.3/ 0000775 0000000 0000000 00000000000 14456506420 0012547 5 ustar 00root root 0000000 0000000 igor2-0.5.3/.github/ 0000775 0000000 0000000 00000000000 14456506420 0014107 5 ustar 00root root 0000000 0000000 igor2-0.5.3/.github/workflows/ 0000775 0000000 0000000 00000000000 14456506420 0016144 5 ustar 00root root 0000000 0000000 igor2-0.5.3/.github/workflows/check.yml 0000664 0000000 0000000 00000001647 14456506420 0017754 0 ustar 00root root 0000000 0000000 name: Checks
on:
push:
pull_request:
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
python-version: ['3.9', '3.10', '3.11']
os: [macos-latest, ubuntu-latest, windows-latest]
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
# prerequisites
python -m pip install --upgrade pip wheel
python -m pip install coverage flake8 pytest
# install dependencies
pip install -e .
# show installed packages
pip freeze
- name: Test with pytest
run: |
coverage run --source=igor2 -m pytest tests
- name: Lint with flake8
run: |
flake8 .
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
igor2-0.5.3/.github/workflows/deploy_pypi.yml 0000664 0000000 0000000 00000001310 14456506420 0021217 0 ustar 00root root 0000000 0000000 name: Release to PyPI
on:
push:
tags:
- '*'
jobs:
deploy:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.10']
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install setuptools wheel twine
pip install -e .
- name: Build and publish
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.PYPI_PWD }}
run: |
pip wheel --no-deps -w dist .
twine upload --skip-existing dist/*
igor2-0.5.3/.gitignore 0000664 0000000 0000000 00000000134 14456506420 0014535 0 ustar 00root root 0000000 0000000 AUTHORS
MANIFEST
build/
dist/
igor.egg-info/
*.pyc
.idea
igor2/_version.py
*.egg-info
.env
igor2-0.5.3/CHANGELOG 0000664 0000000 0000000 00000001137 14456506420 0013763 0 ustar 00root root 0000000 0000000 0.5.3
- setup: bump numpy to 1.25.1
- tests: workaround for broken numpy repr
0.5.2
- docs: cleanup readme
0.5.1
- ci: fix automated pipeline
0.5.0
- BREAKING CHANGE: remove the igorpy compatibility layer
- docs: include Readme.rst for description on PyPI
- tests: migrate to pytest best as possible
- ref: remove hard-coded logging statement from __init__
- ref: flake8
- ref: remove clutter LGPL notices, nobody does that anymore and
the LGPL is advertised sufficiently elsewhere
0.4.0
- fix: merge all pull requests to original igor project (#1, #2, #3)
- setup: migrate to pyproject.toml
igor2-0.5.3/LICENSE 0000664 0000000 0000000 00000016727 14456506420 0013571 0 ustar 00root root 0000000 0000000 GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
igor2-0.5.3/MANIFEST.in 0000664 0000000 0000000 00000000047 14456506420 0014306 0 ustar 00root root 0000000 0000000 include COPYING
include COPYING.LESSER
igor2-0.5.3/Readme.rst 0000664 0000000 0000000 00000005366 14456506420 0014510 0 ustar 00root root 0000000 0000000 Igor2
=====
|PyPI Version| |Build Status| |Coverage Status|
Python parser for Igor Binary Waves (.ibw) and Packed Experiment
(.pxp) files written by WaveMetrics' IGOR Pro software.
Igor2 is the continuation of the inactive igor project, forked
from W. Trevor King and originally written by Paul Kienzle (see
git history).
Installation
------------
You can install igor2 via pip::
pip install igor2
The commands ``igorbinarywave`` and ``igorpackedexperiment`` are currently
not properly implemented (see https://github.com/AFM-analysis/igor2/issues/6),
but it should be straight-forward to fix this.
To install igor2 with the command-line interface (CLI), run::
pip install igor2[CLI]
Usage
-----
This package is a direct replacement of `igor`. Your scripts should work
without any issues if you replace::
import igor
with::
import igor2 as igor
See the docstrings and unit tests for examples using the Python API.
CLI
---
The package also installs to scripts, ``igorbinarywave`` and
``igorpackedexperiment`` which can be used to dump files to stdout.
For details on their usage, use the ``--help`` option. For example::
igorbinarywave --help
Testing
-------
Run internal unit tests with pytest::
pip install -r tests/requirements.txt
pytest tests
The data in the ``test/data`` directory is in the Git repository, but
it is not bundled with the source code. If you want the test data,
you'll have to clone the Git repository or download a snapshot.
Licence
-------
This project is distributed under the `GNU Lesser General Public
License Version 3`_ or greater, see the ``LICENSE`` file distributed
with the project for details.
.. _layman: http://layman.sourceforge.net/
.. _wtk overlay: http://blog.tremily.us/posts/Gentoo_overlay/
.. _Debian: http://www.debian.org/
.. _Gentoo: http://www.gentoo.org/
.. _NumPy: http://numpy.scipy.org/
.. _Matplotlib: http://matplotlib.sourceforge.net/
.. _Nose: http://somethingaboutorange.com/mrl/projects/nose/
.. _Git: http://git-scm.com/
.. _homepage: http://blog.tremily.us/posts/igor/
.. _pip: http://pypi.python.org/pypi/pip
.. _igor.py: http://pypi.python.org/pypi/igor.py
.. _GNU Lesser General Public License Version 3:
http://www.gnu.org/licenses/lgpl.txt
.. _update-copyright: http://blog.tremily.us/posts/update-copyright/
.. |PyPI Version| image:: https://img.shields.io/pypi/v/igor2.svg
:target: https://pypi.python.org/pypi/igor2
.. |Build Status| image:: https://img.shields.io/github/actions/workflow/status/AFM-analysis/igor2/check.yml?branch=master
:target: https://github.com/AFM-analysis/igor2/actions?query=workflow%3AChecks
.. |Coverage Status| image:: https://img.shields.io/codecov/c/github/AFM-analysis/igor2/master.svg
:target: https://codecov.io/gh/AFM-analysis/igor2
igor2-0.5.3/igor2/ 0000775 0000000 0000000 00000000000 14456506420 0013571 5 ustar 00root root 0000000 0000000 igor2-0.5.3/igor2/__init__.py 0000664 0000000 0000000 00000000223 14456506420 0015677 0 ustar 00root root 0000000 0000000 # flake8: noqa: F401
"""Interface for reading binary IGOR files."""
from ._version import __version__, __version_tuple__
from . import binarywave
igor2-0.5.3/igor2/binarywave.py 0000664 0000000 0000000 00000075676 14456506420 0016337 0 ustar 00root root 0000000 0000000 """Read IGOR Binary Wave files into Numpy arrays."""
import logging
# Based on WaveMetric's Technical Note 003, "Igor Binary Format"
# ftp://ftp.wavemetrics.net/IgorPro/Technical_Notes/TN003.zip
# From ftp://ftp.wavemetrics.net/IgorPro/Technical_Notes/TN000.txt
# We place no restrictions on copying Technical Notes, with the
# exception that you cannot resell them. So read, enjoy, and
# share. We hope IGOR Technical Notes will provide you with lots of
# valuable information while you are developing IGOR applications.
import numpy as _numpy
from .struct import Structure as _Structure
from .struct import DynamicStructure as _DynamicStructure
from .struct import Field as _Field
from .struct import DynamicField as _DynamicField
from .util import byte_order as _byte_order
from .util import need_to_reorder_bytes as _need_to_reorder_bytes
logger = logging.getLogger(__name__)
# Numpy doesn't support complex integers by default, see
# http://mail.python.org/pipermail/python-dev/2002-April/022408.html
# http://mail.scipy.org/pipermail/numpy-discussion/2007-October/029447.html
# So we roll our own types. See
# http://docs.scipy.org/doc/numpy/user/basics.rec.html
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.dtype.html
complexInt8 = _numpy.dtype([('real', _numpy.int8), ('imag', _numpy.int8)])
complexInt16 = _numpy.dtype([('real', _numpy.int16), ('imag', _numpy.int16)])
complexInt32 = _numpy.dtype([('real', _numpy.int32), ('imag', _numpy.int32)])
complexUInt8 = _numpy.dtype([('real', _numpy.uint8), ('imag', _numpy.uint8)])
complexUInt16 = _numpy.dtype(
[('real', _numpy.uint16), ('imag', _numpy.uint16)])
complexUInt32 = _numpy.dtype(
[('real', _numpy.uint32), ('imag', _numpy.uint32)])
class StaticStringField (_DynamicField):
_null_terminated = False
_array_size_field = None
def __init__(self, *args, **kwargs):
if 'array' not in kwargs:
kwargs['array'] = True
super(StaticStringField, self).__init__(*args, **kwargs)
def post_unpack(self, parents, data):
wave_structure = parents[-1]
wave_data = self._get_structure_data(parents, data, wave_structure)
d = self._normalize_string(wave_data[self.name])
wave_data[self.name] = d
def _normalize_string(self, d):
if isinstance(d, bytes):
pass
elif hasattr(d, 'tobytes'):
d = d.tobytes()
elif hasattr(d, 'tostring'): # Python 2 compatibility
d = d.tostring()
else:
d = b''.join(d)
if self._array_size_field:
start = 0
strings = []
for count in self.counts:
end = start + count
if end > start:
strings.append(d[start:end])
if self._null_terminated:
strings[-1] = strings[-1].split(b'\x00', 1)[0]
start = end
elif self._null_terminated:
d = d.split(b'\x00', 1)[0]
return d
class NullStaticStringField (StaticStringField):
_null_terminated = True
# Begin IGOR constants and typedefs from IgorBin.h
# From IgorMath.h
TYPE_TABLE = { # (key: integer flag, value: numpy dtype)
0: None, # Text wave, not handled in ReadWave.c
1: _numpy.complex_, # NT_CMPLX, makes number complex.
2: _numpy.float32, # NT_FP32, 32 bit fp numbers.
3: _numpy.complex64,
4: _numpy.float64, # NT_FP64, 64 bit fp numbers.
5: _numpy.complex128,
8: _numpy.int8, # NT_I8, 8 bit signed integer. Requires Igor Pro
# 2.0 or later.
9: complexInt8,
0x10: _numpy.int16, # NT_I16, 16 bit integer numbers. Requires Igor
# Pro 2.0 or later.
0x11: complexInt16,
0x20: _numpy.int32, # NT_I32, 32 bit integer numbers. Requires Igor
# Pro 2.0 or later.
0x21: complexInt32,
# 0x40:None, # NT_UNSIGNED, Makes above signed integers
# # unsigned. Requires Igor Pro 3.0 or later.
0x48: _numpy.uint8,
0x49: complexUInt8,
0x50: _numpy.uint16,
0x51: complexUInt16,
0x60: _numpy.uint32,
0x61: complexUInt32,
}
# From wave.h
MAXDIMS = 4
# From binary.h
BinHeader1 = _Structure( # `version` field pulled out into Wave
name='BinHeader1',
fields=[
_Field(
'l',
'wfmSize',
help='The size of the WaveHeader2 data structure plus the wave '
'data plus 16 bytes of padding.'),
_Field('h', 'checksum',
help='Checksum over this header and the wave header.'),
])
BinHeader2 = _Structure( # `version` field pulled out into Wave
name='BinHeader2',
fields=[
_Field(
'l',
'wfmSize',
help='The size of the WaveHeader2 data structure plus the wave '
'data plus 16 bytes of padding.'),
_Field('l', 'noteSize', help='The size of the note text.'),
_Field('l', 'pictSize', default=0,
help='Reserved. Write zero. Ignore on read.'),
_Field('h', 'checksum',
help='Checksum over this header and the wave header.'),
])
BinHeader3 = _Structure( # `version` field pulled out into Wave
name='BinHeader3',
fields=[
_Field(
'l',
'wfmSize',
help='The size of the WaveHeader2 data structure plus the wave '
'data plus 16 bytes of padding.'),
_Field('l', 'noteSize', help='The size of the note text.'),
_Field('l', 'formulaSize',
help='The size of the dependency formula, if any.'),
_Field('l', 'pictSize', default=0,
help='Reserved. Write zero. Ignore on read.'),
_Field('h', 'checksum',
help='Checksum over this header and the wave header.'),
])
BinHeader5 = _Structure( # `version` field pulled out into Wave
name='BinHeader5',
fields=[
_Field('h', 'checksum',
help='Checksum over this header and the wave header.'),
_Field('l', 'wfmSize',
help='The size of the WaveHeader5 data structure plus the '
'wave data.'),
_Field('l', 'formulaSize',
help='The size of the dependency formula, if any.'),
_Field('l', 'noteSize', help='The size of the note text.'),
_Field('l', 'dataEUnitsSize',
help='The size of optional extended data units.'),
_Field('l', 'dimEUnitsSize', help='The size of optional extended '
'dimension units.',
count=MAXDIMS, array=True),
_Field('l', 'dimLabelsSize', help='The size of optional dimension '
'labels.',
count=MAXDIMS, array=True),
_Field('l', 'sIndicesSize',
help='The size of string indicies if this is a text wave.'),
_Field('l', 'optionsSize1', default=0,
help='Reserved. Write zero. Ignore on read.'),
_Field('l', 'optionsSize2', default=0,
help='Reserved. Write zero. Ignore on read.'),
])
# From wave.h
MAX_WAVE_NAME2 = 18 # Maximum length of wave name in version 1 and 2
# files. Does not include the trailing null.
MAX_WAVE_NAME5 = 31 # Maximum length of wave name in version 5
# files. Does not include the trailing null.
MAX_UNIT_CHARS = 3
# Header to an array of waveform data.
# `wData` field pulled out into DynamicWaveDataField1
WaveHeader2 = _DynamicStructure(
name='WaveHeader2',
fields=[
_Field(
'h',
'type',
help='See types (e.g. NT_FP64) above. Zero for text waves.'),
_Field('P', 'next', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
NullStaticStringField(
'c', 'bname', help='Name of wave plus trailing null.',
count=MAX_WAVE_NAME2 + 2),
_Field('h', 'whVersion', default=0, help='Write 0. Ignore on read.'),
_Field('h', 'srcFldr', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('P', 'fileName', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('c', 'dataUnits', default=0, help='Natural data units go here '
'- null if none.',
count=MAX_UNIT_CHARS + 1, array=True),
_Field('c', 'xUnits', default=0, help='Natural x-axis units go here - '
'null if none.',
count=MAX_UNIT_CHARS + 1, array=True),
_Field('l', 'npnts', help='Number of data points in wave.'),
_Field('h', 'aModified', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('d', 'hsA', help='X value for point p = hsA*p + hsB'),
_Field('d', 'hsB', help='X value for point p = hsA*p + hsB'),
_Field('h', 'wModified', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('h', 'swModified', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('h', 'fsValid', help='True if full scale values have meaning.'),
# sic, 'min' should probably be 'max'
_Field('d', 'topFullScale', help='The min full scale value for wave.'),
_Field('d', 'botFullScale', help='The min full scale value for wave.'),
_Field('c', 'useBits', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('c', 'kindBits', default=0,
help='Reserved. Write zero. Ignore on read.'),
_Field('P', 'formula', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('l', 'depID', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('L', 'creationDate',
help='DateTime of creation. Not used in version 1 files.'),
_Field('c', 'wUnused', default=0,
help='Reserved. Write zero. Ignore on read.', count=2,
array=True),
_Field('L', 'modDate', help='DateTime of last modification.'),
_Field('P', 'waveNoteH',
help='Used in memory only. Write zero. Ignore on read.'),
])
# `sIndices` pointer unset (use Wave5_data['sIndices'] instead). This
# field is filled in by DynamicStringIndicesDataField.
# `wData` field pulled out into DynamicWaveDataField5
WaveHeader5 = _DynamicStructure(
name='WaveHeader5',
fields=[
_Field('P', 'next', help='link to next wave in linked list.'),
_Field('L', 'creationDate', help='DateTime of creation.'),
_Field('L', 'modDate', help='DateTime of last modification.'),
_Field(
'l', 'npnts', help='Total number of points (multiply dimensions '
'up to first zero).'),
_Field(
'h',
'type',
help='See types (e.g. NT_FP64) above. Zero for text waves.'),
_Field('h', 'dLock', default=0,
help='Reserved. Write zero. Ignore on read.'),
_Field('c', 'whpad1', default=0,
help='Reserved. Write zero. Ignore on read.',
count=6, array=True),
_Field('h', 'whVersion', default=1, help='Write 1. Ignore on read.'),
NullStaticStringField(
'c', 'bname', help='Name of wave plus trailing null.',
count=MAX_WAVE_NAME5 + 1),
_Field('l', 'whpad2', default=0,
help='Reserved. Write zero. Ignore on read.'),
_Field('P', 'dFolder', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
# Dimensioning info. [0] == rows, [1] == cols etc
_Field('l', 'nDim', help='Number of of items in a dimension '
'-- 0 means no data.',
count=MAXDIMS, array=True),
_Field(
'd', 'sfA', help='Index value for element e of dimension '
'd = sfA[d]*e + sfB[d].',
count=MAXDIMS, array=True),
_Field(
'd', 'sfB', help='Index value for element e of dimension '
'd = sfA[d]*e + sfB[d].',
count=MAXDIMS, array=True),
# SI units
_Field('c', 'dataUnits', default=0, help='Natural data units go '
'here - null if none.',
count=MAX_UNIT_CHARS + 1, array=True),
_Field('c', 'dimUnits', default=0, help='Natural dimension units '
'go here - null if none.',
count=(MAXDIMS, MAX_UNIT_CHARS + 1), array=True),
_Field('h', 'fsValid', help='TRUE if full scale values have meaning.'),
_Field('h', 'whpad3', default=0,
help='Reserved. Write zero. Ignore on read.'),
# sic, probably "max and min"
_Field('d', 'topFullScale',
help='The max and max full scale value for wave'),
# sic, probably "max and min"
_Field('d', 'botFullScale',
help='The max and max full scale value for wave.'),
_Field('P', 'dataEUnits', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('P', 'dimEUnits', default=0,
help='Used in memory only. Write zero. Ignore on read.',
count=MAXDIMS, array=True),
_Field('P', 'dimLabels', default=0,
help='Used in memory only. Write zero. Ignore on read.',
count=MAXDIMS, array=True),
_Field('P', 'waveNoteH', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('l', 'whUnused', default=0,
help='Reserved. Write zero. Ignore on read.',
count=16, array=True),
# The following stuff is considered private to Igor.
_Field('h', 'aModified', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('h', 'wModified', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('h', 'swModified', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('c', 'useBits', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('c', 'kindBits', default=0,
help='Reserved. Write zero. Ignore on read.'),
_Field('P', 'formula', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('l', 'depID', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('h', 'whpad4', default=0,
help='Reserved. Write zero. Ignore on read.'),
_Field('h', 'srcFldr', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('P', 'fileName', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
_Field('P', 'sIndices', default=0,
help='Used in memory only. Write zero. Ignore on read.'),
])
class DynamicWaveDataField1 (_DynamicField):
def pre_pack(self, parents, data):
raise NotImplementedError()
def pre_unpack(self, parents, data):
# full_structure = parents[0]
wave_structure = parents[-1]
wave_header_structure = wave_structure.fields[1].format
wave_data = self._get_structure_data(parents, data, wave_structure)
version = data['version']
bin_header = wave_data['bin_header']
wave_header = wave_data['wave_header']
self.count = wave_header['npnts']
self.data_size = self._get_size(bin_header, wave_header_structure.size)
type_ = TYPE_TABLE.get(wave_header['type'], None)
if type_:
self.shape = self._get_shape(bin_header, wave_header)
else: # text wave
type_ = _numpy.dtype('S1')
self.shape = (self.data_size,)
# dtype() wrapping to avoid numpy.generic and
# getset_descriptor issues with the builtin numpy types
# (e.g. int32). It has no effect on our local complex
# integers.
self.dtype = _numpy.dtype(type_).newbyteorder(
wave_structure.byte_order)
if (version == 3 and
self.count > 0 and
bin_header['formulaSize'] > 0 and
self.data_size == 0):
"""From TN003:
Igor Pro 2.00 included support for dependency formulae. If
a wave was governed by a dependency formula then the
actual wave data was not written to disk for that wave,
because on loading the wave Igor could recalculate the
data. However,this prevented the wave from being loaded
into an experiment other than the original
experiment. Consequently, in a version of Igor Pro 3.0x,
we changed it so that the wave data was written even if
the wave was governed by a dependency formula. When
reading a binary wave file, you can detect that the wave
file does not contain the wave data by examining the
wfmSize, formulaSize and npnts fields. If npnts is greater
than zero and formulaSize is greater than zero and
the waveDataSize as calculated above is zero, then this is
a file governed by a dependency formula that was written
without the actual wave data.
"""
self.shape = (0,)
elif TYPE_TABLE.get(wave_header['type'], None) is not None:
assert self.data_size == self.count * self.dtype.itemsize, (
self.data_size, self.count, self.dtype.itemsize, self.dtype)
else:
assert self.data_size >= 0, (
bin_header['wfmSize'], wave_header_structure.size)
def _get_size(self, bin_header, wave_header_size):
return bin_header['wfmSize'] - wave_header_size - 16
def _get_shape(self, bin_header, wave_header):
return (self.count,)
def unpack(self, stream):
data_b = stream.read(self.data_size)
try:
data = _numpy.ndarray(
shape=self.shape,
dtype=self.dtype,
buffer=data_b,
order='F',
)
except BaseException:
logger.error(
'could not reshape data from {} to {}'.format(
self.shape, data_b))
raise
return data
class DynamicWaveDataField5 (DynamicWaveDataField1):
"Adds support for multidimensional data."
def _get_size(self, bin_header, wave_header_size):
return bin_header['wfmSize'] - wave_header_size
def _get_shape(self, bin_header, wave_header):
return [n for n in wave_header['nDim'] if n > 0] or (0,)
# End IGOR constants and typedefs from IgorBin.h
class DynamicStringField (StaticStringField):
_size_field = None
def pre_unpack(self, parents, data):
size = self._get_size_data(parents, data)
if self._array_size_field:
self.counts = size
self.count = sum(self.counts)
else:
self.count = size
self.setup()
def _get_size_data(self, parents, data):
wave_structure = parents[-1]
wave_data = self._get_structure_data(parents, data, wave_structure)
bin_header = wave_data['bin_header']
return bin_header[self._size_field]
class DynamicWaveNoteField (DynamicStringField):
_size_field = 'noteSize'
class DynamicDependencyFormulaField (DynamicStringField):
"""Optional wave dependency formula
Excerpted from TN003:
A wave has a dependency formula if it has been bound by a
statement such as "wave0 := sin(x)". In this example, the
dependency formula is "sin(x)". The formula is stored with
no trailing null byte.
"""
_size_field = 'formulaSize'
# Except when it is stored with a trailing null byte :p. See, for
# example, test/data/mac-version3Dependent.ibw.
_null_terminated = True
class DynamicDataUnitsField (DynamicStringField):
"""Optional extended data units data
Excerpted from TN003:
dataUnits - Present in versions 1, 2, 3, 5. The dataUnits field
stores the units for the data represented by the wave. It is a C
string terminated with a null character. This field supports
units of 0 to 3 bytes. In version 1, 2 and 3 files, longer units
can not be represented. In version 5 files, longer units can be
stored using the optional extended data units section of the
file.
"""
_size_field = 'dataEUnitsSize'
class DynamicDimensionUnitsField (DynamicStringField):
"""Optional extended dimension units data
Excerpted from TN003:
xUnits - Present in versions 1, 2, 3. The xUnits field stores the
X units for a wave. It is a C string terminated with a null
character. This field supports units of 0 to 3 bytes. In
version 1, 2 and 3 files, longer units can not be represented.
dimUnits - Present in version 5 only. This field is an array of 4
strings, one for each possible wave dimension. Each string
supports units of 0 to 3 bytes. Longer units can be stored using
the optional extended dimension units section of the file.
"""
_size_field = 'dimEUnitsSize'
_array_size_field = True
class DynamicLabelsField (DynamicStringField):
"""Optional dimension label data
From TN003:
If the wave has dimension labels for dimension d then the
dimLabelsSize[d] field of the BinHeader5 structure will be
non-zero.
A wave will have dimension labels if a SetDimLabel command has
been executed on it.
A 3 point 1D wave has 4 dimension labels. The first dimension
label is the label for the dimension as a whole. The next three
dimension labels are the labels for rows 0, 1, and 2. When Igor
writes dimension labels to disk, it writes each dimension label as
a C string (null-terminated) in a field of 32 bytes.
"""
_size_field = 'dimLabelsSize'
_array_size_field = True
def post_unpack(self, parents, data):
wave_structure = parents[-1]
wave_data = self._get_structure_data(parents, data, wave_structure)
bin_header = wave_data['bin_header']
d = wave_data[self.name]
dim_labels = []
start = 0
for size in bin_header[self._size_field]:
end = start + size
if end > start:
dim_data = d[start:end]
chunks = []
for i in range(size // 32):
chunks.append(dim_data[32 * i:32 * (i + 1)])
labels = [b'']
for chunk in chunks:
labels[-1] = labels[-1] + b''.join(chunk)
if b'\x00' in chunk:
labels.append(b'')
labels.pop(-1)
start = end
else:
labels = []
dim_labels.append(labels)
wave_data[self.name] = dim_labels
class DynamicStringIndicesDataField (_DynamicField):
"""String indices used for text waves only
"""
def pre_pack(self, parents, data):
raise NotImplementedError()
def pre_unpack(self, parents, data):
wave_structure = parents[-1]
wave_data = self._get_structure_data(parents, data, wave_structure)
bin_header = wave_data['bin_header']
wave_header = wave_data['wave_header']
self.string_indices_size = bin_header['sIndicesSize']
self.count = self.string_indices_size // 4
if self.count: # make sure we're in a text wave
assert TYPE_TABLE[wave_header['type']] is None, wave_header
self.setup()
def post_unpack(self, parents, data):
if not self.count:
return
wave_structure = parents[-1]
wave_data = self._get_structure_data(parents, data, wave_structure)
wave_header = wave_data['wave_header']
wdata = wave_data['wData']
strings = []
start = 0
for i, offset in enumerate(wave_data['sIndices']):
if offset > start:
chars = wdata[start:offset]
strings.append(b''.join(chars))
start = offset
elif offset == start:
strings.append(b'')
else:
raise ValueError((offset, wave_data['sIndices']))
wdata = _numpy.array(strings)
shape = [n for n in wave_header['nDim'] if n > 0] or (0,)
try:
wdata = wdata.reshape(shape)
except ValueError:
logger.error(
'could not reshape strings from {} to {}'.format(
shape, wdata.shape))
raise
wave_data['wData'] = wdata
class DynamicVersionField (_DynamicField):
def pre_pack(self, parents, byte_order):
raise NotImplementedError()
def post_unpack(self, parents, data):
wave_structure = parents[-1]
wave_data = self._get_structure_data(parents, data, wave_structure)
version = wave_data['version']
if wave_structure.byte_order in '@=':
need_to_reorder_bytes = _need_to_reorder_bytes(version)
wave_structure.byte_order = _byte_order(need_to_reorder_bytes)
logger.debug(
'get byte order from version: {} (reorder? {})'.format(
wave_structure.byte_order, need_to_reorder_bytes))
else:
need_to_reorder_bytes = False
old_format = wave_structure.fields[-1].format
if version == 1:
wave_structure.fields[-1].format = Wave1
elif version == 2:
wave_structure.fields[-1].format = Wave2
elif version == 3:
wave_structure.fields[-1].format = Wave3
elif version == 5:
wave_structure.fields[-1].format = Wave5
elif not need_to_reorder_bytes:
raise ValueError(
'invalid binary wave version: {}'.format(version))
if wave_structure.fields[-1].format != old_format:
logger.debug('change wave headers from {} to {}'.format(
old_format, wave_structure.fields[-1].format))
wave_structure.setup()
elif need_to_reorder_bytes:
wave_structure.setup()
# we might need to unpack again with the new byte order
return need_to_reorder_bytes
class DynamicWaveField (_DynamicField):
def post_unpack(self, parents, data):
return
# The following draft code was written by Trevor King and was
# commented out during the igor -> igor2 package migration.
#
# checksum_size = bin.size + wave.size
# wave_structure = parents[-1]
# if version == 5:
# # Version 5 checksum does not include the wData field.
# checksum_size -= 4
# c = _checksum(b, parents[-1].byte_order, 0, checksum_size)
# if c != 0:
# raise ValueError(
# ('This does not appear to be a valid Igor binary wave file. '
# 'Error in checksum: should be 0, is {}.').format(c))
Wave1 = _DynamicStructure(
name='Wave1',
fields=[
_Field(
BinHeader1,
'bin_header',
help='Binary wave header'),
_Field(
WaveHeader2,
'wave_header',
help='Wave header'),
DynamicWaveDataField1(
'f',
'wData',
help='The start of the array of waveform data.',
count=0,
array=True),
])
Wave2 = _DynamicStructure(
name='Wave2',
fields=[
_Field(
BinHeader2,
'bin_header',
help='Binary wave header'),
_Field(
WaveHeader2,
'wave_header',
help='Wave header'),
DynamicWaveDataField1(
'f',
'wData',
help='The start of the array of waveform data.',
count=0,
array=True),
_Field(
'x',
'padding',
help='16 bytes of padding in versions 2 and 3.',
count=16,
array=True),
DynamicWaveNoteField(
'c',
'note',
help='Optional wave note data',
count=0,
array=True),
])
Wave3 = _DynamicStructure(
name='Wave3',
fields=[
_Field(
BinHeader3,
'bin_header',
help='Binary wave header'),
_Field(
WaveHeader2,
'wave_header',
help='Wave header'),
DynamicWaveDataField1(
'f',
'wData',
help='The start of the array of waveform data.',
count=0,
array=True),
_Field(
'x',
'padding',
help='16 bytes of padding in versions 2 and 3.',
count=16,
array=True),
DynamicWaveNoteField(
'c',
'note',
help='Optional wave note data',
count=0,
array=True),
DynamicDependencyFormulaField(
'c',
'formula',
help='Optional wave dependency formula',
count=0,
array=True),
])
Wave5 = _DynamicStructure(
name='Wave5',
fields=[
_Field(
BinHeader5, 'bin_header',
help='Binary wave header'),
_Field(
WaveHeader5, 'wave_header',
help='Wave header'),
DynamicWaveDataField5(
'f', 'wData',
help='The start of the array of waveform data.',
count=0, array=True),
DynamicDependencyFormulaField(
'c', 'formula',
help='Optional wave dependency formula.',
count=0, array=True),
DynamicWaveNoteField(
'c', 'note',
help='Optional wave note data.', count=0,
array=True),
DynamicDataUnitsField(
'c', 'data_units',
help='Optional extended data units data.',
count=0, array=True),
DynamicDimensionUnitsField(
'c', 'dimension_units',
help='Optional dimension label data',
count=0, array=True),
DynamicLabelsField(
'c', 'labels',
help="Optional dimension label data",
count=0, array=True),
DynamicStringIndicesDataField(
'P', 'sIndices',
help='Dynamic string indices for text waves.',
count=0, array=True),])
Wave = _DynamicStructure(
name='Wave',
fields=[
DynamicVersionField(
'h',
'version',
help='Version number for backwards compatibility.'),
DynamicWaveField(
Wave1,
'wave',
help='The rest of the wave data.'),
])
def load(filename):
if hasattr(filename, 'read'):
f = filename # filename is actually a stream object
else:
f = open(filename, 'rb')
try:
Wave.byte_order = '='
Wave.setup()
data = Wave.unpack_stream(f)
finally:
if not hasattr(filename, 'read'):
f.close()
return data
def save(filename):
raise NotImplementedError
igor2-0.5.3/igor2/cli/ 0000775 0000000 0000000 00000000000 14456506420 0014340 5 ustar 00root root 0000000 0000000 igor2-0.5.3/igor2/cli/igorbinarywave.py 0000775 0000000 0000000 00000001000 14456506420 0017734 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
"""IBW -> ASCII conversion"""
import pprint
import numpy
from igor.binarywave import load
from igor.script import Script
class WaveScript (Script):
def _run(self, args):
wave = load(args.infile)
numpy.savetxt(
args.outfile, wave['wave']['wData'], fmt='%g', delimiter='\t')
self.plot_wave(args, wave)
if args.verbose > 0:
wave['wave'].pop('wData')
pprint.pprint(wave)
s = WaveScript(description=__doc__)
s.run()
igor2-0.5.3/igor2/cli/igorpackedexperiment.py 0000775 0000000 0000000 00000003313 14456506420 0021126 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
#
# Copyright (C) 2012 W. Trevor King
#
# This file is part of igor.
#
# igor is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# igor is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with igor. If not, see .
"PXP -> ASCII conversion"
import pprint
from igor.packed import load, walk
from igor.record.wave import WaveRecord
from igor.script import Script
class PackedScript (Script):
def _run(self, args):
self.args = args
records, filesystem = load(args.infile)
if hasattr(args.outfile, 'write'):
f = args.outfile # filename is actually a stream object
else:
f = open(args.outfile, 'w')
try:
f.write(pprint.pformat(records))
f.write('\n')
finally:
if f != args.outfile:
f.close()
if args.verbose > 0:
pprint.pprint(filesystem)
walk(filesystem, self._plot_wave_callback)
def _plot_wave_callback(self, dirpath, key, value):
if isinstance(value, WaveRecord):
self.plot_wave(self.args, value.wave, title=dirpath + [key])
s = PackedScript(
description=__doc__, filetype='IGOR Packed Experiment (.pxp) file')
s.run()
igor2-0.5.3/igor2/cli/script.py 0000664 0000000 0000000 00000004555 14456506420 0016227 0 ustar 00root root 0000000 0000000 """Common code for scripts distributed with the `igor` package."""
from __future__ import absolute_import
import argparse as _argparse
import logging
import sys as _sys
import matplotlib.pyplot as pyplot
from .._version import __version__
logger = logging.getLogger(__name__)
class Script (object):
log_levels = [logging.ERROR, logging.WARNING,
logging.INFO, logging.DEBUG]
def __init__(self, description=None,
filetype='IGOR Binary Wave (.ibw) file'):
self.parser = _argparse.ArgumentParser(description=description)
self.parser.add_argument(
'--version', action='version',
version='%(prog)s {}'.format(__version__))
self.parser.add_argument(
'-f', '--infile', metavar='FILE', default='-',
help='input {}'.format(filetype))
self.parser.add_argument(
'-o', '--outfile', metavar='FILE', default='-',
help='file for ASCII output')
self.parser.add_argument(
'-p', '--plot', action='store_const', const=True,
help='use Matplotlib to plot any IGOR waves')
self.parser.add_argument(
'-V', '--verbose', action='count', default=0,
help='increment verbosity')
self._num_plots = 0
def run(self, *args, **kwargs):
args = self.parser.parse_args(*args, **kwargs)
if args.infile == '-':
args.infile = _sys.stdin
if args.outfile == '-':
args.outfile = _sys.stdout
if args.verbose > 1:
log_level = self.log_levels[min(
args.verbose - 1, len(self.log_levels) - 1)]
logger.setLevel(log_level)
self._run(args)
self.display_plots()
def _run(self, args):
raise NotImplementedError()
def plot_wave(self, args, wave, title=None):
if not args.plot:
return # no-op
if title is None:
title = wave['wave']['wave_header']['bname']
figure = pyplot.figure()
axes = figure.add_subplot(1, 1, 1)
axes.set_title(title)
try:
axes.plot(wave['wave']['wData'], 'r.')
except ValueError as error:
logger.error('error plotting {}: {}'.format(title, error))
pass
self._num_plots += 1
def display_plots(self):
if self._num_plots:
pyplot.show()
igor2-0.5.3/igor2/packed.py 0000664 0000000 0000000 00000017642 14456506420 0015404 0 ustar 00root root 0000000 0000000 """Read IGOR Packed Experiment files into records."""
import logging
from .struct import Structure as _Structure
from .struct import Field as _Field
from .util import byte_order as _byte_order
from .util import need_to_reorder_bytes as _need_to_reorder_bytes
from .util import _bytes
from .record import RECORD_TYPE as _RECORD_TYPE
from .record.base import UnknownRecord as _UnknownRecord
from .record.base import UnusedRecord as _UnusedRecord
from .record.folder import FolderStartRecord as _FolderStartRecord
from .record.folder import FolderEndRecord as _FolderEndRecord
from .record.variables import VariablesRecord as _VariablesRecord
from .record.wave import WaveRecord as _WaveRecord
logger = logging.getLogger(__name__)
# From PTN003:
# Igor writes other kinds of records in a packed experiment file, for
# storing things like pictures, page setup records, and miscellaneous
# settings. The format for these records is quite complex and is not
# described in PTN003. If you are writing a program to read packed
# files, you must skip any record with a record type that is not
# listed above.
PackedFileRecordHeader = _Structure(
name='PackedFileRecordHeader',
fields=[
_Field('H', 'recordType', help='Record type plus superceded flag.'),
_Field('h', 'version',
help='Version information depends on the type of record.'),
_Field('l', 'numDataBytes',
help='Number of data bytes in the record following this'
'record header.'),
])
# CR_STR = '\x15' (\r)
PACKEDRECTYPE_MASK = 0x7FFF # Record type = (recordType & PACKEDREC_TYPE_MASK)
SUPERCEDED_MASK = 0x8000 # Bit is set if the record is superceded by
# a later record in the packed file.
def load(filename, strict=True, ignore_unknown=True):
logger.debug('loading a packed experiment file from {}'.format(filename))
records = []
if hasattr(filename, 'read'):
f = filename # filename is actually a stream object
else:
f = open(filename, 'rb')
byte_order = None
initial_byte_order = '='
try:
while True:
PackedFileRecordHeader.byte_order = initial_byte_order
PackedFileRecordHeader.setup()
b = bytes(f.read(PackedFileRecordHeader.size))
if not b:
break
if len(b) < PackedFileRecordHeader.size:
raise ValueError(
('not enough data for the next record header ({} < {})'
).format(len(b), PackedFileRecordHeader.size))
logger.debug('reading a new packed experiment file record')
header = PackedFileRecordHeader.unpack_from(b)
if header['version'] and not byte_order:
need_to_reorder = _need_to_reorder_bytes(header['version'])
byte_order = initial_byte_order = _byte_order(need_to_reorder)
logger.debug(
'get byte order from version: {} (reorder? {})'.format(
byte_order, need_to_reorder))
if need_to_reorder:
PackedFileRecordHeader.byte_order = byte_order
PackedFileRecordHeader.setup()
header = PackedFileRecordHeader.unpack_from(b)
logger.debug(
'reordered version: {}'.format(header['version']))
data = bytes(f.read(header['numDataBytes']))
if len(data) < header['numDataBytes']:
raise ValueError(
('not enough data for the next record ({} < {})'
).format(len(b), header['numDataBytes']))
record_type = _RECORD_TYPE.get(
header['recordType'] & PACKEDRECTYPE_MASK, _UnknownRecord)
logger.debug('the new record has type {} ({}).'.format(
record_type, header['recordType']))
if record_type in [_UnknownRecord, _UnusedRecord
] and not ignore_unknown:
raise KeyError('unkown record type {}'.format(
header['recordType']))
records.append(record_type(header, data, byte_order=byte_order))
finally:
logger.debug('finished loading {} records from {}'.format(
len(records), filename))
if not hasattr(filename, 'read'):
f.close()
filesystem = _build_filesystem(records)
return records, filesystem
def _build_filesystem(records):
# From PTN003:
"""The name must be a valid Igor data folder name. See Object
Names in the Igor Reference help file for name rules.
When Igor Pro reads the data folder start record, it creates a new
data folder with the specified name. Any subsequent variable, wave
or data folder start records cause Igor to create data objects in
this new data folder, until Igor Pro reads a corresponding data
folder end record."""
# From the Igor Manual, chapter 2, section 8, page II-123
# http://www.wavemetrics.net/doc/igorman/II-08%20Data%20Folders.pdf
"""Like the Macintosh file system, Igor Pro's data folders use the
colon character (:) to separate components of a path to an
object. This is analogous to Unix which uses / and Windows which
uses \\. (Reminder: Igor's data folders exist wholly in memory
while an experiment is open. It is not a disk file system!)
A data folder named "root" always exists and contains all other
data folders.
"""
# From the Igor Manual, chapter 4, page IV-2
# http://www.wavemetrics.net/doc/igorman/IV-01%20Commands.pdf
"""For waves and data folders only, you can also use "liberal"
names. Liberal names can include almost any character, including
spaces and dots (see Liberal Object Names on page III-415 for
details).
"""
# From the Igor Manual, chapter 3, section 16, page III-416
# http://www.wavemetrics.net/doc/igorman/III-16%20Miscellany.pdf
"""Liberal names have the same rules as standard names except you
may use any character except control characters and the following:
" ' : ;
"""
filesystem = {'root': {}}
dir_stack = [('root', filesystem['root'])]
for record in records:
cwd = dir_stack[-1][-1]
if isinstance(record, _FolderStartRecord):
name = record.null_terminated_text
cwd[name] = {}
dir_stack.append((name, cwd[name]))
elif isinstance(record, _FolderEndRecord):
dir_stack.pop()
elif isinstance(record, (_VariablesRecord, _WaveRecord)):
if isinstance(record, _VariablesRecord):
sys_vars = record.variables['variables']['sysVars'].keys()
for filename, value in record.namespace.items():
if len(dir_stack) > 1 and filename in sys_vars:
# From PTN003:
"""When reading a packed file, any system
variables encountered while the current data
folder is not the root should be ignored.
"""
continue
_check_filename(dir_stack, filename)
cwd[filename] = value
else: # WaveRecord
filename = record.wave['wave']['wave_header']['bname']
_check_filename(dir_stack, filename)
cwd[filename] = record
return filesystem
def _check_filename(dir_stack, filename):
cwd = dir_stack[-1][-1]
if filename in cwd:
raise ValueError('collision on name {} in {}'.format(
filename, ':'.join(d for d, cwd in dir_stack)))
def walk(filesystem, callback, dirpath=None):
"""Walk a packed experiment filesystem, operating on each key,value pair.
"""
if dirpath is None:
dirpath = []
for key, value in sorted((_bytes(k), v) for k, v in filesystem.items()):
callback(dirpath, key, value)
if isinstance(value, dict):
walk(filesystem=value, callback=callback, dirpath=dirpath + [key])
igor2-0.5.3/igor2/record/ 0000775 0000000 0000000 00000000000 14456506420 0015047 5 ustar 00root root 0000000 0000000 igor2-0.5.3/igor2/record/__init__.py 0000664 0000000 0000000 00000001277 14456506420 0017167 0 ustar 00root root 0000000 0000000 # flake8: noqa: F401
"""Record parsers for IGOR's packed experiment files."""
from .base import Record, UnknownRecord, UnusedRecord
from .variables import VariablesRecord
from .history import HistoryRecord, RecreationRecord, GetHistoryRecord
from .wave import WaveRecord
from .procedure import ProcedureRecord
from .packedfile import PackedFileRecord
from .folder import FolderStartRecord, FolderEndRecord
# From PackedFile.h
RECORD_TYPE = {
0: UnusedRecord,
1: VariablesRecord,
2: HistoryRecord,
3: WaveRecord,
4: RecreationRecord,
5: ProcedureRecord,
6: UnusedRecord,
7: GetHistoryRecord,
8: PackedFileRecord,
9: FolderStartRecord,
10: FolderEndRecord,
}
igor2-0.5.3/igor2/record/base.py 0000664 0000000 0000000 00000001455 14456506420 0016340 0 ustar 00root root 0000000 0000000 class Record (object):
def __init__(self, header, data, byte_order=None):
self.header = header
self.data = data
self.byte_order = byte_order
def __str__(self):
return self.__repr__()
def __repr__(self):
return '<{} {}>'.format(self.__class__.__name__, id(self))
class UnknownRecord (Record):
def __repr__(self):
return '<{}-{} {}>'.format(
self.__class__.__name__, self.header['recordType'], id(self))
class UnusedRecord (Record):
pass
class TextRecord (Record):
def __init__(self, *args, **kwargs):
super(TextRecord, self).__init__(*args, **kwargs)
self.text = bytes(self.data).replace(
b'\r\n', b'\n').replace(b'\r', b'\n')
self.null_terminated_text = self.text.split(b'\x00', 1)[0]
igor2-0.5.3/igor2/record/folder.py 0000664 0000000 0000000 00000000175 14456506420 0016677 0 ustar 00root root 0000000 0000000 from .base import TextRecord
class FolderStartRecord (TextRecord):
pass
class FolderEndRecord (TextRecord):
pass
igor2-0.5.3/igor2/record/history.py 0000664 0000000 0000000 00000000252 14456506420 0017121 0 ustar 00root root 0000000 0000000 from .base import TextRecord
class HistoryRecord (TextRecord):
pass
class RecreationRecord (TextRecord):
pass
class GetHistoryRecord (TextRecord):
pass
igor2-0.5.3/igor2/record/packedfile.py 0000664 0000000 0000000 00000000105 14456506420 0017504 0 ustar 00root root 0000000 0000000 from .base import Record
class PackedFileRecord (Record):
pass
igor2-0.5.3/igor2/record/procedure.py 0000664 0000000 0000000 00000000114 14456506420 0017405 0 ustar 00root root 0000000 0000000 from .base import TextRecord
class ProcedureRecord (TextRecord):
pass
igor2-0.5.3/igor2/record/variables.py 0000664 0000000 0000000 00000030134 14456506420 0017372 0 ustar 00root root 0000000 0000000 import io as _io
import logging
from ..binarywave import TYPE_TABLE as _TYPE_TABLE
from ..binarywave import NullStaticStringField as _NullStaticStringField
from ..binarywave import DynamicStringField as _DynamicStringField
from ..struct import Structure as _Structure
from ..struct import DynamicStructure as _DynamicStructure
from ..struct import Field as _Field
from ..struct import DynamicField as _DynamicField
from ..util import byte_order as _byte_order
from ..util import need_to_reorder_bytes as _need_to_reorder_bytes
from .base import Record
logger = logging.getLogger(__name__)
class ListedStaticStringField(_NullStaticStringField):
"""Handle string conversions for multi-count dynamic parents.
If a field belongs to a multi-count dynamic parent, the parent is
called multiple times to parse each count, and the field's
post-unpack hook gets called after the field is unpacked during
each iteration. This requires alternative logic for getting and
setting the string data. The actual string formatting code is not
affected.
"""
def post_unpack(self, parents, data):
parent_structure = parents[-1]
parent_data = self._get_structure_data(parents, data, parent_structure)
d = self._normalize_string(parent_data[-1][self.name])
parent_data[-1][self.name] = d
class ListedDynamicStrDataField(_DynamicStringField, ListedStaticStringField):
_size_field = 'strLen'
_null_terminated = False
def _get_size_data(self, parents, data):
parent_structure = parents[-1]
parent_data = self._get_structure_data(parents, data, parent_structure)
return parent_data[-1][self._size_field]
class DynamicVarDataField(_DynamicField):
def __init__(self, *args, **kwargs):
if 'array' not in kwargs:
kwargs['array'] = True
super(DynamicVarDataField, self).__init__(*args, **kwargs)
def pre_pack(self, parents, data):
raise NotImplementedError()
def post_unpack(self, parents, data):
var_structure = parents[-1]
var_data = self._get_structure_data(parents, data, var_structure)
data = var_data[self.name]
d = {}
for i, value in enumerate(data):
key, value = self._normalize_item(i, value)
d[key] = value
var_data[self.name] = d
def _normalize_item(self, index, value):
raise NotImplementedError()
class DynamicSysVarField(DynamicVarDataField):
def _normalize_item(self, index, value):
name = 'K{}'.format(index)
return name, value
class DynamicUserVarField(DynamicVarDataField):
def _normalize_item(self, index, value):
name = value['name']
value = value['num']
return name, value
class DynamicUserStrField(DynamicVarDataField):
def _normalize_item(self, index, value):
name = value['name']
value = value['data']
return name, value
class DynamicVarNumField(_DynamicField):
def post_unpack(self, parents, data):
parent_structure = parents[-1]
parent_data = self._get_structure_data(parents, data, parent_structure)
d = self._normalize_numeric_variable(parent_data[-1][self.name])
parent_data[-1][self.name] = d
def _normalize_numeric_variable(self, num_var):
t = _TYPE_TABLE[num_var['numType']]
if num_var['numType'] % 2: # complex number
return t(complex(num_var['realPart'], num_var['imagPart']))
else:
return t(num_var['realPart'])
class DynamicFormulaField (_DynamicStringField):
_size_field = 'formulaLen'
_null_terminated = True
# From Variables.h
VarHeader1 = _Structure( # `version` field pulled out into VariablesRecord
name='VarHeader1',
fields=[
_Field(
'h',
'numSysVars',
help='Number of system variables (K0, K1, ...).'),
_Field('h', 'numUserVars',
help='Number of user numeric variables -- may be zero.'),
_Field('h', 'numUserStrs',
help='Number of user string variables -- may be zero.'),
])
# From Variables.h
VarHeader2 = _Structure( # `version` field pulled out into VariablesRecord
name='VarHeader2',
fields=[
_Field(
'h',
'numSysVars',
help='Number of system variables (K0, K1, ...).'),
_Field('h', 'numUserVars',
help='Number of user numeric variables -- may be zero.'),
_Field('h', 'numUserStrs',
help='Number of user string variables -- may be zero.'),
_Field('h', 'numDependentVars',
help='Number of dependent numeric variables -- may be zero.'),
_Field('h', 'numDependentStrs',
help='Number of dependent string variables -- may be zero.'),
])
# From Variables.h
UserStrVarRec1 = _DynamicStructure(
name='UserStrVarRec1',
fields=[
ListedStaticStringField(
'c', 'name', help='Name of the string variable.', count=32),
_Field('h', 'strLen', help='The real size of the following array.'),
ListedDynamicStrDataField('c', 'data'),
])
# From Variables.h
UserStrVarRec2 = _DynamicStructure(
name='UserStrVarRec2',
fields=[
ListedStaticStringField(
'c', 'name', help='Name of the string variable.', count=32),
_Field('l', 'strLen', help='The real size of the following array.'),
_Field('c', 'data'),
])
# From Variables.h
VarNumRec = _Structure(
name='VarNumRec',
fields=[
_Field('h', 'numType', help='Type from binarywave.TYPE_TABLE'),
_Field('d', 'realPart', help='The real part of the number.'),
_Field('d', 'imagPart', help='The imag part if the number is '
'complex.'),
_Field('l', 'reserved', help='Reserved - set to zero.'),
])
# From Variables.h
UserNumVarRec = _DynamicStructure(
name='UserNumVarRec',
fields=[
ListedStaticStringField(
'c',
'name',
help='Name of the string variable.',
count=32),
_Field(
'h',
'type',
help='0 = string, 1 = numeric.'),
DynamicVarNumField(
VarNumRec,
'num',
help='Type and value of the variable if it is numeric. '
'Not used for string.'),
])
# From Variables.h
UserDependentVarRec = _DynamicStructure(
name='UserDependentVarRec',
fields=[
ListedStaticStringField(
'c',
'name',
help='Name of the string variable.',
count=32),
_Field(
'h',
'type',
help='0 = string, 1 = numeric.'),
_Field(
VarNumRec,
'num',
help='Type and value of the variable if it is numeric. '
'Not used for string.'),
_Field(
'h',
'formulaLen',
help='The length of the dependency formula.'),
DynamicFormulaField(
'c',
'formula',
help='Start of the dependency formula. A C string including '
'null terminator.'),
])
class DynamicVarHeaderField (_DynamicField):
def pre_pack(self, parents, data):
raise NotImplementedError()
def post_unpack(self, parents, data):
var_structure = parents[-1]
var_data = self._get_structure_data(
parents, data, var_structure)
# var_header_structure = self.format
data = var_data['var_header']
sys_vars_field = var_structure.get_field('sysVars')
sys_vars_field.count = data['numSysVars']
sys_vars_field.setup()
user_vars_field = var_structure.get_field('userVars')
user_vars_field.count = data['numUserVars']
user_vars_field.setup()
user_strs_field = var_structure.get_field('userStrs')
user_strs_field.count = data['numUserStrs']
user_strs_field.setup()
if 'numDependentVars' in data:
dependent_vars_field = var_structure.get_field('dependentVars')
dependent_vars_field.count = data['numDependentVars']
dependent_vars_field.setup()
dependent_strs_field = var_structure.get_field('dependentStrs')
dependent_strs_field.count = data['numDependentStrs']
dependent_strs_field.setup()
var_structure.setup()
Variables1 = _DynamicStructure(
name='Variables1',
fields=[
DynamicVarHeaderField(VarHeader1, 'var_header',
help='Variables header'),
DynamicSysVarField('f', 'sysVars', help='System variables', count=0),
DynamicUserVarField(UserNumVarRec, 'userVars',
help='User numeric variables', count=0),
DynamicUserStrField(UserStrVarRec1, 'userStrs',
help='User string variables', count=0),
])
Variables2 = _DynamicStructure(
name='Variables2',
fields=[
DynamicVarHeaderField(VarHeader2, 'var_header',
help='Variables header'),
DynamicSysVarField('f', 'sysVars', help='System variables', count=0),
DynamicUserVarField(UserNumVarRec, 'userVars',
help='User numeric variables', count=0),
DynamicUserStrField(UserStrVarRec2, 'userStrs',
help='User string variables', count=0),
_Field(UserDependentVarRec, 'dependentVars',
help='Dependent numeric variables.', count=0, array=True),
_Field(UserDependentVarRec, 'dependentStrs',
help='Dependent string variables.', count=0, array=True),
])
class DynamicVersionField (_DynamicField):
def pre_pack(self, parents, byte_order):
raise NotImplementedError()
def post_unpack(self, parents, data):
variables_structure = parents[-1]
variables_data = self._get_structure_data(
parents, data, variables_structure)
version = variables_data['version']
if variables_structure.byte_order in '@=':
need_to_reorder_bytes = _need_to_reorder_bytes(version)
variables_structure.byte_order = _byte_order(need_to_reorder_bytes)
logger.debug(
'get byte order from version: {} (reorder? {})'.format(
variables_structure.byte_order, need_to_reorder_bytes))
else:
need_to_reorder_bytes = False
old_format = variables_structure.fields[-1].format
if version == 1:
variables_structure.fields[-1].format = Variables1
elif version == 2:
variables_structure.fields[-1].format = Variables2
elif not need_to_reorder_bytes:
raise ValueError(
'invalid variables record version: {}'.format(version))
if variables_structure.fields[-1].format != old_format:
logger.debug('change variables record from {} to {}'.format(
old_format, variables_structure.fields[-1].format))
variables_structure.setup()
elif need_to_reorder_bytes:
variables_structure.setup()
# we might need to unpack again with the new byte order
return need_to_reorder_bytes
VariablesRecordStructure = _DynamicStructure(
name='VariablesRecord',
fields=[
DynamicVersionField(
'h', 'version', help='Version number for this header.'),
_Field(
Variables1,
'variables',
help='The rest of the variables data.'),
])
class VariablesRecord (Record):
def __init__(self, *args, **kwargs):
super(VariablesRecord, self).__init__(*args, **kwargs)
# self.header['version'] # record version always 0?
VariablesRecordStructure.byte_order = '='
VariablesRecordStructure.setup()
stream = _io.BytesIO(bytes(self.data))
self.variables = VariablesRecordStructure.unpack_stream(stream)
self.namespace = {}
for key, value in self.variables['variables'].items():
if key not in ['var_header']:
logger.debug('update namespace {} with {} for {}'.format(
self.namespace, value, key))
self.namespace.update(value)
igor2-0.5.3/igor2/record/wave.py 0000664 0000000 0000000 00000000522 14456506420 0016362 0 ustar 00root root 0000000 0000000 from io import BytesIO as _BytesIO
from ..binarywave import load as _loadibw
from . import Record
class WaveRecord (Record):
def __init__(self, *args, **kwargs):
super(WaveRecord, self).__init__(*args, **kwargs)
self.wave = _loadibw(_BytesIO(bytes(self.data)))
def __str__(self):
return str(self.wave)
igor2-0.5.3/igor2/struct.py 0000664 0000000 0000000 00000070426 14456506420 0015500 0 ustar 00root root 0000000 0000000 """Structure and Field classes for declaring structures
There are a few formats that can be used to represent the same data, a
binary packed format with all the data in a buffer, a linearized
format with each field in a single Python list, and a nested format
with each field in a hierarchy of Python dictionaries.
"""
from __future__ import absolute_import
import io as _io
import logging
import pprint as _pprint
import struct as _struct
import numpy as _numpy
logger = logging.getLogger(__name__)
class Field (object):
"""Represent a Structure field.
The format argument can be a format character from the ``struct``
documentation (e.g., ``c`` for ``char``, ``h`` for ``short``, ...)
or ``Structure`` instance (for building nested structures).
Examples
--------
>>> from pprint import pprint
>>> import numpy
Example of an unsigned short integer field:
>>> time = Field(
... 'I', 'time', default=0, help='POSIX time')
>>> time.arg_count
1
>>> list(time.pack_data(1))
[1]
>>> list(time.pack_item(2))
[2]
>>> time.unpack_data([3])
3
>>> time.unpack_item([4])
4
Example of a multi-dimensional float field:
>>> data = Field(
... 'f', 'data', help='example data', count=(2,3,4), array=True)
>>> data.arg_count
24
>>> list(data.indexes()) # doctest: +ELLIPSIS
[[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 0, 3], [0, 1, 0], ..., [1, 2, 3]]
>>> list(data.pack_data(
... [[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]],
... [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]])
... ) # doctest: +ELLIPSIS
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ..., 19, 20, 21, 22, 23]
>>> list(data.pack_item(3))
[3]
>>> data.unpack_data(range(data.arg_count))
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> data.unpack_item([3])
3
Example of a nested structure field:
>>> run = Structure('run', fields=[time, data])
>>> runs = Field(run, 'runs', help='pair of runs', count=2, array=True)
>>> runs.arg_count # = 2 * (1 + 24)
50
>>> data1 = numpy.arange(data.arg_count).reshape(data.count)
>>> data2 = data1 + data.arg_count
>>> list(runs.pack_data(
... [{'time': 100, 'data': data1},
... {'time': 101, 'data': data2}])
... ) # doctest: +ELLIPSIS
[100, 0, 1, 2, ..., 22, 23, 101, 24, 25, ..., 46, 47]
>>> list(runs.pack_item({'time': 100, 'data': data1})
... ) # doctest: +ELLIPSIS
[100, 0, 1, 2, ..., 22, 23]
>>> pprint(runs.unpack_data(range(runs.arg_count)))
[{'data': array([[[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]]),
'time': 0},
{'data': array([[[26, 27, 28, 29],
[30, 31, 32, 33],
[34, 35, 36, 37]],
[[38, 39, 40, 41],
[42, 43, 44, 45],
[46, 47, 48, 49]]]),
'time': 25}]
>>> pprint(runs.unpack_item(range(runs.structure_count)))
{'data': array([[[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]]),
'time': 0}
If you don't give enough values for an array field, the remaining
values are filled in with their defaults.
>>> list(data.pack_data(
... [[[0, 1, 2, 3], [4, 5, 6]], [[10]]])) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: no default for
>>> data.default = 0
>>> list(data.pack_data(
... [[[0, 1, 2, 3], [4, 5, 6]], [[10]]]))
[0, 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
See Also
--------
Structure
"""
def __init__(self, format, name, default=None, help=None, count=1,
array=False):
self.format = format
self.name = name
self.default = default
self.help = help
self.count = count
self.array = array
self.setup()
def setup(self):
"""Setup any dynamic properties of a field.
Use this method to recalculate dynamic properities after
changing the basic properties set during initialization.
"""
logger.debug('setup {}'.format(self))
self.item_count = _numpy.prod(self.count) # number of item repeats
if not self.array and self.item_count != 1:
raise ValueError(
'{} must be an array field to have a count of {}'.format(
self, self.count))
if isinstance(self.format, Structure):
self.structure_count = sum(
f.arg_count for f in self.format.fields)
self.arg_count = self.item_count * self.structure_count
elif self.format == 'x':
self.arg_count = 0 # no data in padding bytes
else:
self.arg_count = self.item_count # struct.Struct format args
def __str__(self):
return self.__repr__()
def __repr__(self):
return '<{} {} {}>'.format(
self.__class__.__name__, self.name, id(self))
def indexes(self):
"""Iterate through indexes to a possibly multi-dimensional array"""
assert self.array, self
try:
i = [0] * len(self.count)
except TypeError: # non-iterable count
for i in range(self.count):
yield i
else:
for i in range(self.item_count):
index = []
for j, c in enumerate(reversed(self.count)):
index.insert(0, i % c)
i //= c
yield index
def pack_data(self, data=None):
"""Linearize a single field's data to a flat list.
If the field is repeated (count > 1), the incoming data should
be iterable with each iteration returning a single item.
"""
if self.array:
if data is None:
data = []
if hasattr(data, 'flat'): # take advantage of numpy's ndarray.flat
items = 0
for item in data.flat:
items += 1
for arg in self.pack_item(item):
yield arg
if items < self.item_count:
raise NotImplementedError(
"You have reached a bad state! Please copy the "
"code and data you used to get here and paste it at "
"https://github.com/AFM-analysis/igor2/issues/5")
# https://github.com/AFM-analysis/igor2/issues/5
# Original code from W. Trevor King:
# if f.default is None:
# raise ValueError(
# 'no default for {}.{}'.format(self, f))
# for i in range(self.item_count - items):
# yield f.default
else:
for index in self.indexes():
try:
if isinstance(index, int):
item = data[index]
else:
item = data
for i in index:
item = item[i]
except IndexError:
item = None
for arg in self.pack_item(item):
yield arg
else:
for arg in self.pack_item(data):
yield arg
def pack_item(self, item=None):
"""Linearize a single count of the field's data to a flat iterable
"""
if isinstance(self.format, Structure):
for i in self.format._pack_item(item):
yield i
elif item is None:
if self.default is None:
raise ValueError('no default for {}'.format(self))
yield self.default
else:
yield item
def unpack_data(self, data):
"""Inverse of .pack_data"""
logger.debug('unpack {} for {} {}'.format(data, self, self.format))
iterator = iter(data)
try:
items = [next(iterator) for i in range(self.arg_count)]
except StopIteration:
raise ValueError('not enough data to unpack {}'.format(self))
try:
next(iterator)
except StopIteration:
pass
else:
raise ValueError('too much data to unpack {}'.format(self))
if isinstance(self.format, Structure):
# break into per-structure clumps
s = self.structure_count
items = zip(*[items[i::s] for i in range(s)])
else:
items = [[i] for i in items]
unpacked = [self.unpack_item(i) for i in items]
if self.arg_count:
count = self.count
else:
count = 0 # padding bytes, etc.
if not self.array:
assert count == 1, (self, self.count)
return unpacked[0]
if isinstance(self.format, Structure):
try:
len(self.count)
except TypeError:
pass
else:
raise NotImplementedError('reshape Structure field')
else:
unpacked = _numpy.array(unpacked)
logger.debug('reshape {} data from {} to {}'.format(
self, unpacked.shape, count))
unpacked = unpacked.reshape(count)
return unpacked
def unpack_item(self, item):
"""Inverse of .unpack_item"""
if isinstance(self.format, Structure):
return self.format._unpack_item(item)
else:
assert len(item) == 1, item
return item[0]
class DynamicField (Field):
"""Represent a DynamicStructure field with a dynamic definition.
Adds the methods ``.pre_pack``, ``pre_unpack``, and
``post_unpack``, all of which are called when a ``DynamicField``
is used by a ``DynamicStructure``. Each method takes the
arguments ``(parents, data)``, where ``parents`` is a list of
``DynamicStructure``\\s that own the field and ``data`` is a dict
hierarchy of the structure data.
See the ``DynamicStructure`` docstring for the exact timing of the
method calls.
See Also
--------
Field, DynamicStructure
"""
def pre_pack(self, parents, data):
"Prepare to pack."
pass
def pre_unpack(self, parents, data):
"React to previously unpacked data"
pass
def post_unpack(self, parents, data):
"React to our own data"
pass
def _get_structure_data(self, parents, data, structure):
"""Extract the data belonging to a particular ancestor structure.
"""
d = data
s = parents[0]
if s == structure:
return d
for p in parents[1:]:
for f in s.fields:
if f.format == p:
s = p
d = d[f.name]
break
assert s == p, (s, p)
if p == structure:
break
return d
class Structure (_struct.Struct):
r"""Represent a C structure.
A convenient wrapper around struct.Struct that uses Fields and
adds dict-handling methods for transparent name assignment.
See Also
--------
Field
Examples
--------
>>> import array
>>> from pprint import pprint
Represent the C structures::
struct run {
unsigned int time;
short data[2][3];
};
struct experiment {
unsigned short version;
struct run runs[2];
};
As:
>>> time = Field('I', 'time', default=0, help='POSIX time')
>>> data = Field(
... 'h', 'data', default=0, help='example data', count=(2,3),
... array=True)
>>> run = Structure('run', fields=[time, data])
>>> version = Field(
... 'H', 'version', default=1, help='example version')
>>> runs = Field(run, 'runs', help='pair of runs', count=2, array=True)
>>> experiment = Structure('experiment', fields=[version, runs])
The structures automatically calculate the flattened data format:
>>> run.format
'@Ihhhhhh'
>>> run.size # 4 + 2*3*2
16
>>> experiment.format
'@HIhhhhhhIhhhhhh'
>>> experiment.size # 2 + 2 + 2*(4 + 2*3*2)
36
The first two elements in the above size calculation are 2 (for
the unsigned short, 'H') and 2 (padding so the unsigned int aligns
with a 4-byte block). If you select a byte ordering that doesn't
mess with alignment and recalculate the format, the padding goes
away and you get:
>>> experiment.set_byte_order('>')
>>> experiment.get_format()
'>HIhhhhhhIhhhhhh'
>>> experiment.size
34
You can read data out of any object supporting the buffer
interface:
>>> b = array.array('B', range(experiment.size))
>>> d = experiment.unpack_from(buffer=b)
>>> pprint(d)
{'runs': [{'data': array([[1543, 2057, 2571],
[3085, 3599, 4113]]),
'time': 33752069},
{'data': array([[5655, 6169, 6683],
[7197, 7711, 8225]]),
'time': 303240213}],
'version': 1}
>>> [hex(x) for x in d['runs'][0]['data'].flat]
['0x607L', '0x809L', '0xa0bL', '0xc0dL', '0xe0fL', '0x1011L']
You can also read out from strings:
>>> d = experiment.unpack(b.tobytes())
>>> pprint(d)
{'runs': [{'data': array([[1543, 2057, 2571],
[3085, 3599, 4113]]),
'time': 33752069},
{'data': array([[5655, 6169, 6683],
[7197, 7711, 8225]]),
'time': 303240213}],
'version': 1}
If you don't give enough values for an array field, the remaining
values are filled in with their defaults.
>>> experiment.pack_into(buffer=b, data=d)
>>> b.tobytes()[:17]
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10'
>>> b.tobytes()[17:]
'\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !'
>>> run0 = d['runs'].pop(0)
>>> b = experiment.pack(data=d)
>>> b[:17]
'\x00\x01\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f '
>>> b[17:]
'!\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
If you set ``count=0``, the field is ignored.
>>> experiment2 = Structure('experiment', fields=[
... version, Field('f', 'ignored', count=0, array=True), runs],
... byte_order='>')
>>> experiment2.format
'>HIhhhhhhIhhhhhh'
>>> d = experiment2.unpack(b)
>>> pprint(d)
{'ignored': array([], dtype=float64),
'runs': [{'data': array([[5655, 6169, 6683],
[7197, 7711, 8225]]),
'time': 303240213},
{'data': array([[0, 0, 0],
[0, 0, 0]]), 'time': 0}],
'version': 1}
>>> del d['ignored']
>>> b2 = experiment2.pack(d)
>>> b2 == b
True
"""
_byte_order_symbols = '@=<>!'
def __init__(self, name, fields, byte_order='@'):
# '=' for native byte order, standard size and alignment
# See http://docs.python.org/library/struct for details
self.name = name
self.fields = fields
self.byte_order = byte_order
self.setup()
def __str__(self):
return self.name
def __repr__(self):
return '<{} {} {}>'.format(
self.__class__.__name__, self.name, id(self))
def setup(self):
"""Setup any dynamic properties of a structure.
Use this method to recalculate dynamic properities after
changing the basic properties set during initialization.
"""
logger.debug('setup {!r}'.format(self))
self.set_byte_order(self.byte_order)
self.get_format()
def set_byte_order(self, byte_order):
"""Allow changing the format byte_order on the fly.
"""
logger.debug('set byte order for {!r} to {}'.format(self, byte_order))
self.byte_order = byte_order
for field in self.fields:
if isinstance(field.format, Structure):
field.format.set_byte_order(byte_order)
def get_format(self):
format = self.byte_order + ''.join(self.sub_format())
# P format only allowed for native byte ordering
# Convert P to I for ILP32 compatibility when running on a LP64.
format = format.replace('P', 'I')
try:
super(Structure, self).__init__(format=format)
except _struct.error as e:
raise ValueError((e, format))
return format
def sub_format(self):
logger.debug('calculate sub-format for {!r}'.format(self))
for field in self.fields:
if isinstance(field.format, Structure):
field_format = list(
field.format.sub_format()) * field.item_count
else:
field_format = [field.format] * field.item_count
for fmt in field_format:
yield fmt
def _pack_item(self, item=None):
"""Linearize a single count of the structure's data to a flat iterable
"""
if item is None:
item = {}
for f in self.fields:
try:
data = item[f.name]
except TypeError:
raise ValueError((f.name, item))
except KeyError:
data = None
for arg in f.pack_data(data):
yield arg
def _unpack_item(self, args):
"""Inverse of ._unpack_item"""
data = {}
iterator = iter(args)
for f in self.fields:
try:
items = [next(iterator) for i in range(f.arg_count)]
except StopIteration:
raise ValueError('not enough data to unpack {}.{}'.format(
self, f))
data[f.name] = f.unpack_data(items)
try:
next(iterator)
except StopIteration:
pass
else:
raise ValueError('too much data to unpack {}'.format(self))
return data
def pack(self, data):
args = list(self._pack_item(data))
try:
return super(Structure, self).pack(*args)
except BaseException:
raise ValueError(self.format)
def pack_into(self, buffer, offset=0, data={}):
args = list(self._pack_item(data))
return super(Structure, self).pack_into(
buffer, offset, *args)
def unpack(self, *args, **kwargs):
args = super(Structure, self).unpack(*args, **kwargs)
return self._unpack_item(args)
def unpack_from(self, buffer, offset=0, *args, **kwargs):
logger.debug(
'unpack {!r} for {!r} ({}, offset={}) with {} ({})'.format(
buffer, self, len(buffer), offset, self.format, self.size))
args = super(Structure, self).unpack_from(
buffer, offset, *args, **kwargs)
return self._unpack_item(args)
def get_field(self, name):
return [f for f in self.fields if f.name == name][0]
class DebuggingStream (object):
def __init__(self, stream):
self.stream = stream
def read(self, size):
data = self.stream.read(size)
logger.debug('read {} from {}: ({}) {!r}'.format(
size, self.stream, len(data), data))
return data
class DynamicStructure (Structure):
r"""Represent a C structure field with a dynamic definition.
Any dynamic fields have their ``.pre_pack`` called before any
structure packing is done. ``.pre_unpack`` is called for a
particular field just before that field's ``.unpack_data`` call.
``.post_unpack`` is called for a particular field just after
``.unpack_data``. If ``.post_unpack`` returns ``True``, the same
field is unpacked again.
Examples
--------
>>> from pprint import pprint
This allows you to define structures where some portion of the
global structure depends on earlier data. For example, in the
quasi-C structure::
struct vector {
unsigned int length;
short data[length];
};
You can generate a Python version of this structure in two ways,
with a dynamic ``length``, or with a dynamic ``data``. In both
cases, the required methods are the same, the only difference is
where you attach them.
>>> def packer(self, parents, data):
... vector_structure = parents[-1]
... vector_data = self._get_structure_data(
... parents, data, vector_structure)
... length = len(vector_data['data'])
... vector_data['length'] = length
... data_field = vector_structure.get_field('data')
... data_field.count = length
... data_field.setup()
>>> def unpacker(self, parents, data):
... vector_structure = parents[-1]
... vector_data = self._get_structure_data(
... parents, data, vector_structure)
... length = vector_data['length']
... data_field = vector_structure.get_field('data')
... data_field.count = length
... data_field.setup()
>>> class DynamicLengthField (DynamicField):
... def pre_pack(self, parents, data):
... packer(self, parents, data)
... def post_unpack(self, parents, data):
... unpacker(self, parents, data)
>>> dynamic_length_vector = DynamicStructure('vector',
... fields=[
... DynamicLengthField('I', 'length'),
... Field('h', 'data', count=0, array=True),
... ],
... byte_order='>')
>>> class DynamicDataField (DynamicField):
... def pre_pack(self, parents, data):
... packer(self, parents, data)
... def pre_unpack(self, parents, data):
... unpacker(self, parents, data)
>>> dynamic_data_vector = DynamicStructure('vector',
... fields=[
... Field('I', 'length'),
... DynamicDataField('h', 'data', count=0, array=True),
... ],
... byte_order='>')
>>> b = b'\x00\x00\x00\x02\x01\x02\x03\x04'
>>> d = dynamic_length_vector.unpack(b)
>>> pprint(d)
{'data': array([258, 772]), 'length': 2}
>>> d = dynamic_data_vector.unpack(b)
>>> pprint(d)
{'data': array([258, 772]), 'length': 2}
>>> d['data'] = [1,2,3,4]
>>> dynamic_length_vector.pack(d)
'\x00\x00\x00\x04\x00\x01\x00\x02\x00\x03\x00\x04'
>>> dynamic_data_vector.pack(d)
'\x00\x00\x00\x04\x00\x01\x00\x02\x00\x03\x00\x04'
The implementation is a good deal more complicated than the one
for ``Structure``, because we must make multiple calls to
``struct.Struct.unpack`` to unpack the data.
"""
# def __init__(self, *args, **kwargs):
# pass #self.parent = ..
def _pre_pack(self, parents=None, data=None):
if parents is None:
parents = [self]
else:
parents = parents + [self]
for f in self.fields:
if hasattr(f, 'pre_pack'):
logger.debug('pre-pack {}'.format(f))
f.pre_pack(parents=parents, data=data)
if isinstance(f.format, DynamicStructure):
logger.debug('pre-pack {!r}'.format(f.format))
f._pre_pack(parents=parents, data=data)
def pack(self, data):
self._pre_pack(data=data)
self.setup()
return super(DynamicStructure, self).pack(data)
def pack_into(self, buffer, offset=0, data={}):
self._pre_pack(data=data)
self.setup()
return super(DynamicStructure, self).pack_into(
buffer=buffer, offset=offset, data=data)
def unpack_stream(self, stream, parents=None, data=None, d=None):
# `d` is the working data directory
if data is None:
parents = [self]
data = d = {}
if logger.level <= logging.DEBUG:
stream = DebuggingStream(stream)
else:
parents = parents + [self]
for f in self.fields:
logger.debug('parsing {!r}.{} (count={}, item_count={})'.format(
self, f, f.count, f.item_count))
if logger.level <= logging.DEBUG:
logger.debug('data:\n{}'.format(_pprint.pformat(data)))
if hasattr(f, 'pre_unpack'):
logger.debug('pre-unpack {}'.format(f))
f.pre_unpack(parents=parents, data=data)
if hasattr(f, 'unpack'): # override default unpacking
logger.debug('override unpack for {}'.format(f))
d[f.name] = f.unpack(stream)
continue
# setup for unpacking loop
if isinstance(f.format, Structure):
f.format.set_byte_order(self.byte_order)
f.setup()
f.format.setup()
if isinstance(f.format, DynamicStructure):
if f.array:
d[f.name] = []
for i in range(f.item_count):
x = {}
d[f.name].append(x)
f.format.unpack_stream(
stream, parents=parents, data=data, d=x)
else:
assert f.item_count == 1, (f, f.count)
d[f.name] = {}
f.format.unpack_stream(
stream, parents=parents, data=data, d=d[f.name])
if hasattr(f, 'post_unpack'):
logger.debug('post-unpack {}'.format(f))
repeat = f.post_unpack(parents=parents, data=data)
if repeat:
raise NotImplementedError(
'cannot repeat unpack for dynamic structures')
continue
if isinstance(f.format, Structure):
logger.debug('parsing {} bytes for {}'.format(
f.format.size, f.format.format))
bs = [stream.read(f.format.size) for i in range(f.item_count)]
def unpack():
f.format.set_byte_order(self.byte_order)
f.setup()
f.format.setup()
x = [f.format.unpack_from(b) for b in bs]
if not f.array:
assert len(x) == 1, (f, f.count, x)
x = x[0]
return x
else:
field_format = self.byte_order + f.format * f.item_count
field_format = field_format.replace('P', 'I')
try:
size = _struct.calcsize(field_format)
except _struct.error as e:
logger.error(e)
logger.error('{}.{}: {}'.format(self, f, field_format))
raise
logger.debug('parsing {} bytes for preliminary {}'.format(
size, field_format))
raw = stream.read(size)
if len(raw) < size:
raise ValueError(
'not enough data to unpack {}.{} ({} < {})'.format(
self, f, len(raw), size))
def unpack():
field_format = self.byte_order + f.format * f.item_count
field_format = field_format.replace('P', 'I')
logger.debug('parse previous bytes using {}'.format(
field_format))
struct = _struct.Struct(field_format)
items = struct.unpack(raw)
return f.unpack_data(items)
# unpacking loop
repeat = True
while repeat:
d[f.name] = unpack()
if hasattr(f, 'post_unpack'):
logger.debug('post-unpack {}'.format(f))
repeat = f.post_unpack(parents=parents, data=data)
else:
repeat = False
if repeat:
logger.debug('repeat unpack for {}'.format(f))
return data
def unpack(self, string):
stream = _io.BytesIO(string)
return self.unpack_stream(stream)
def unpack_from(self, buffer, offset=0, *args, **kwargs):
args = super(Structure, self).unpack_from(
buffer, offset, *args, **kwargs)
return self._unpack_item(args)
igor2-0.5.3/igor2/util.py 0000664 0000000 0000000 00000007533 14456506420 0015130 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 W. Trevor King
#
# This file is part of igor.
#
# igor is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# igor is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with igor. If not, see .
"Utility functions for handling buffers"
import sys as _sys
import numpy as _numpy
def _ord(byte):
r"""Convert a byte to an integer.
>>> buffer = b'\x00\x01\x02'
>>> [_ord(b) for b in buffer]
[0, 1, 2]
"""
if _sys.version_info >= (3,):
return byte
else:
return ord(byte)
def hex_bytes(buffer, spaces=None):
r"""Pretty-printing for binary buffers.
>>> hex_bytes(b'\x00\x01\x02\x03\x04')
'0001020304'
>>> hex_bytes(b'\x00\x01\x02\x03\x04', spaces=1)
'00 01 02 03 04'
>>> hex_bytes(b'\x00\x01\x02\x03\x04', spaces=2)
'0001 0203 04'
>>> hex_bytes(b'\x00\x01\x02\x03\x04\x05\x06', spaces=2)
'0001 0203 0405 06'
>>> hex_bytes(b'\x00\x01\x02\x03\x04\x05\x06', spaces=3)
'000102 030405 06'
"""
hex_bytes = ['{:02x}'.format(_ord(x)) for x in buffer]
if spaces is None:
return ''.join(hex_bytes)
elif spaces == 1:
return ' '.join(hex_bytes)
for i in range(len(hex_bytes) // spaces):
hex_bytes.insert((spaces + 1) * (i + 1) - 1, ' ')
return ''.join(hex_bytes)
def assert_null(buffer, strict=True):
r"""Ensure an input buffer is entirely zero.
>>> import sys
>>> assert_null(b'')
>>> assert_null(b'\x00\x00')
>>> assert_null(b'\x00\x01\x02\x03')
Traceback (most recent call last):
...
ValueError: 00 01 02 03
>>> stderr = sys.stderr
>>> sys.stderr = sys.stdout
>>> assert_null(b'\x00\x01\x02\x03', strict=False)
warning: post-data padding not zero: 00 01 02 03
>>> sys.stderr = stderr
"""
if buffer and _ord(max(buffer)) != 0:
hex_string = hex_bytes(buffer, spaces=1)
if strict:
raise ValueError(hex_string)
else:
_sys.stderr.write(
'warning: post-data padding not zero: {}\n'.format(hex_string))
# From ReadWave.c
def byte_order(needToReorderBytes):
little_endian = _sys.byteorder == 'little'
if needToReorderBytes:
little_endian = not little_endian
if little_endian:
return '<' # little-endian
return '>' # big-endian
# From ReadWave.c
def need_to_reorder_bytes(version):
# If the low order byte of the version field of the BinHeader
# structure is zero then the file is from a platform that uses
# different byte-ordering and therefore all data will need to be
# reordered.
return version & 0xFF == 0
# From ReadWave.c
def checksum(buffer, byte_order, oldcksum, numbytes):
x = _numpy.ndarray(
(numbytes / 2,), # 2 bytes to a short -- ignore trailing odd byte
dtype=_numpy.dtype(byte_order + 'h'),
buffer=buffer)
oldcksum += x.sum()
if oldcksum > 2**31: # fake the C implementation's int rollover
oldcksum %= 2**32
if oldcksum > 2**31:
oldcksum -= 2**31
return oldcksum & 0xffff
def _bytes(obj, encoding='utf-8'):
"""Convert bytes or strings into bytes
>>> _bytes(b'123')
'123'
>>> _bytes('123')
'123'
"""
if _sys.version_info >= (3,):
if isinstance(obj, bytes):
return obj
else:
return bytes(obj, encoding)
else:
return bytes(obj)
igor2-0.5.3/pyproject.toml 0000664 0000000 0000000 00000002203 14456506420 0015460 0 ustar 00root root 0000000 0000000 [build-system]
requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"]
build-backend = "setuptools.build_meta"
[project]
name = "igor2"
authors = [
{name = "Paul Kienzle"},
{name = "W. Trevor King"},
{name = "Aurelien Jaquier"},
{name = "Zbigniew Jędrzejewski-Szmek"},
{name = "Paul Müller"},
]
description = "interface for reading binary IGOR files"
readme = "Readme.rst"
requires-python = ">=3.8, <4"
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
'Programming Language :: Python :: 3',
]
license = {text = "GNU Lesser General Public License v3 or later (LGPLv3+)"}
dependencies = [
"numpy>=1.25.1",
]
dynamic = ["version"]
[project.optional-dependencies]
CLI = [
"matplotlib",
]
[project.scripts]
igorbinarywave = 'igor2.cli:igorbinarywave'
igorpackedexperiment = 'igor2.cli:igorpackedexperiment'
[tool.setuptools]
packages = ["igor2"]
[tool.setuptools_scm]
write_to = "igor2/_version.py"
version_scheme = "post-release"
igor2-0.5.3/tests/ 0000775 0000000 0000000 00000000000 14456506420 0013711 5 ustar 00root root 0000000 0000000 igor2-0.5.3/tests/data/ 0000775 0000000 0000000 00000000000 14456506420 0014622 5 ustar 00root root 0000000 0000000 igor2-0.5.3/tests/data/README 0000664 0000000 0000000 00000000234 14456506420 0015501 0 ustar 00root root 0000000 0000000 .ibw samples are from TN003.zip.
polar-graphs-demo.pxp was distributed with IGOR Pro 5.04 as
Examples/Graphing Techniques/Obsolete/Polar Graphs Demo.pxp
igor2-0.5.3/tests/data/mac-double.ibw 0000664 0000000 0000000 00000000266 14456506420 0017341 0 ustar 00root root 0000000 0000000 b1 double ? 蘂 蘂 @ @ @ @ ? #B igor2-0.5.3/tests/data/mac-textWave.ibw 0000664 0000000 0000000 00000000646 14456506420 0017700 0 ustar 00root root 0000000 0000000 R WW text0 %R ? ? ? ? %\0Maryhadalittlelamb igor2-0.5.3/tests/data/mac-version2.ibw 0000664 0000000 0000000 00000000261 14456506420 0017631 0 ustar 00root root 0000000 0000000 ] version2 ? x ` @ @ @@ @ ? &2 This is a test. igor2-0.5.3/tests/data/mac-version3Dependent.ibw 0000664 0000000 0000000 00000000226 14456506420 0021462 0 ustar 00root root 0000000 0000000 ~ version3Dependent
? ) K0 igor2-0.5.3/tests/data/mac-version5.ibw 0000664 0000000 0000000 00000000743 14456506420 0017641 0 ustar 00root root 0000000 0000000 T @ %TyT` version5 %R ? ? ? ? %O %Op %Pt @ @ @@ @ ? This is a test. Column0 igor2-0.5.3/tests/data/mac-zeroPointWave.ibw 0000664 0000000 0000000 00000000600 14456506420 0020673 0 ustar 00root root 0000000 0000000 @ bLbL zeroWave %R ? ? ? ? igor2-0.5.3/tests/data/polar-graphs-demo.pxp 0000664 0000000 0000000 00000200703 14456506420 0020676 0 ustar 00root root 0000000 0000000 4 Misc_Start : P PICT_0 &