pax_global_header00006660000000000000000000000064134327606320014520gustar00rootroot0000000000000052 comment=b089d6231ff982041e797de108138ee85108b930 binoculars-0.0.4/000077500000000000000000000000001343276063200136625ustar00rootroot00000000000000binoculars-0.0.4/.gitignore000066400000000000000000000000131343276063200156440ustar00rootroot00000000000000*.pyc *.swpbinoculars-0.0.4/AUTHORS000066400000000000000000000005721343276063200147360ustar00rootroot00000000000000Written by Willem Onderwaater and Sander Roobol as part of a collaboration between the ID03 beamline at the European Synchrotron Radiation Facility and the Interface Physics group at Leiden University. Contributions, Debian packaging and Maintainance done by Picca Frédéric-Emmanuel of the Coordination et Opération transverses group from the the French SOLEIL Synchrotron. binoculars-0.0.4/README.md000066400000000000000000000037311343276063200151450ustar00rootroot00000000000000BINoculars ========== BINoculars is a tool for data reduction and analysis of large sets of surface diffraction data that have been acquired with a 2D X-ray detector. The intensity of each pixel of a 2D-detector is projected onto a 3-dimensional grid in reciprocal lattice coordinates using a binning algorithm. This allows for fast acquisition and processing of high-resolution datasets and results in a significant reduction of the size of the dataset. The subsequent analysis then proceeds in reciprocal space. It has evolved from the specific needs of the ID03 beamline at the ESRF, but it has a modular design and can be easily adjusted and extended to work with data from other beamlines or from other measurement techniques. This work has been [published](http://dx.doi.org/10.1107/S1600576715009607) with open access in the Journal of Applied Crystallography Volume 48, Part 4 (August 2015) ## Installation Grab the [latest sourcecode as zip](https://github.com/id03/binoculars/archive/master.zip) or clone the Git repository. Run `binoculars`, `binoculars-fitaid`, `binoculars-gui` or `binoculars-processgui` directly from the command line. ## Usage The [BINoculars wiki](https://github.com/id03/binoculars/wiki) contains a detailed tutorial to get started. ## Scripting If you want more complex operations than offered by the command line or GUI tools, you can manipulate BINoculars data directly from Python. Some examples with detailed comments can be found in the [repository](https://github.com/id03/binoculars/tree/master/examples/scripts). The API documentation on the `binoculars` and `binoculars.space` modules can be accessed via pydoc, e.g. run `pydoc -w binoculars binoculars.space` to generate HTML files. ## Extending BINoculars If you want to use BINoculars with your beamline, you need to write a `backend` module. The code contains an [example implementation](https://github.com/id03/binoculars/blob/master/BINoculars/backends/example.py) with many hints and comments. binoculars-0.0.4/binoculars/000077500000000000000000000000001343276063200160235ustar00rootroot00000000000000binoculars-0.0.4/binoculars/__init__.py000066400000000000000000000240511343276063200201360ustar00rootroot00000000000000from __future__ import print_function, with_statement, division import os import sys # for scripted useage def run(args): '''Parameters args: string String as if typed in terminal. The string must consist of the location of the configuration file and the command for specifying the jobs that need to be processed. All additonal configuration file overides can be included Returns A tuple of binoculars spaces Examples: >>> space = binoculars.run('config.txt 10') >>> space[0] Axes (3 dimensions, 2848 points, 33.0 kB) { Axis qx (min=-0.01, max=0.0, res=0.01, count=2) Axis qy (min=-0.04, max=-0.01, res=0.01, count=4) Axis qz (min=0.48, max=4.03, res=0.01, count=356) } ''' import binoculars.main binoculars.util.register_python_executable(__file__) main = binoculars.main.Main.from_args(args.split(' ')) if isinstance(main.result, binoculars.space.Multiverse): return main.result.spaces if type(main.result) == bool: filenames = main.dispatcher.config.destination.final_filenames() return tuple(binoculars.space.Space.fromfile(fn) for fn in filenames) def load(filename, key=None): ''' Parameters filename: string Only hdf5 files are acceptable key: a tuple with slices in as much dimensions as the space is Returns A binoculars space Examples: >>> space = binoculars.load('test.hdf5') >>> space Axes (3 dimensions, 2848 points, 33.0 kB) { Axis qx (min=-0.01, max=0.0, res=0.01, count=2) Axis qy (min=-0.04, max=-0.01, res=0.01, count=4) Axis qz (min=0.48, max=4.03, res=0.01, count=356) } ''' import binoculars.space if os.path.exists(filename): return binoculars.space.Space.fromfile(filename, key=key) else: raise IOError("File '{0}' does not exist".format(filename)) def save(filename, space): ''' Save a space to file Parameters filename: string filename to which the data is saved. '.txt', '.hdf5' are supported. space: binoculars space the space containing the data that needs to be saved Examples: >>> space Axes (3 dimensions, 2848 points, 33.0 kB) { Axis qx (min=-0.01, max=0.0, res=0.01, count=2) Axis qy (min=-0.04, max=-0.01, res=0.01, count=4) Axis qz (min=0.48, max=4.03, res=0.01, count=356) } >>> binoculars.save('test.hdf5', space) ''' import binoculars.space import binoculars.util if isinstance(space, binoculars.space.Space): ext = os.path.splitext(filename)[-1] if ext == '.txt': binoculars.util.space_to_txt(space, filename) elif ext == '.edf': binoculars.util.space_to_edf(space, filename) else: space.tofile(filename) else: raise TypeError("'{0!r}' is not a binoculars space".format(space)) def plotspace(space, log=True, clipping=0.0, fit=None, norm=None, colorbar=True, labels=True, **plotopts): ''' plots a space with the correct axes. The space can be either one or two dimensinal. Parameters space: binoculars space the space containing the data that needs to be plotted log: bool axes or colorscale logarithmic clipping: 0 < float < 1 cuts a lowest and highst value on the color scale fit: numpy.ndarray same shape and the space. If one dimensional the fit will be overlayed. norm: matplotlib.colors object defining the colorscale colorbar: bool show or not show the colorbar labels: bool show or not show the labels plotopts: keyword arguments keywords that will be accepted by matplotlib.pyplot.plot or matplotlib.pyplot.imshow Examples: >>> space Axes (3 dimensions, 2848 points, 33.0 kB) { Axis qx (min=-0.01, max=0.0, res=0.01, count=2) Axis qy (min=-0.04, max=-0.01, res=0.01, count=4) Axis qz (min=0.48, max=4.03, res=0.01, count=356) } >>> binoculars.plotspace('test.hdf5') ''' import matplotlib.pyplot as pyplot import binoculars.plot import binoculars.space if isinstance(space, binoculars.space.Space): if space.dimension == 3: from mpl_toolkits.mplot3d import Axes3D ax = pyplot.gcf().gca(projection='3d') return binoculars.plot.plot(space, pyplot.gcf(), ax, log=log, clipping=clipping, fit=None, norm=norm, colorbar=colorbar, labels=labels, **plotopts) if fit is not None and space.dimension == 2: ax = pyplot.gcf().add_subplot(121) binoculars.plot.plot(space, pyplot.gcf(), ax, log=log, clipping=clipping, fit=None, norm=norm, colorbar=colorbar, labels=labels, **plotopts) ax = pyplot.gcf().add_subplot(122) return binoculars.plot.plot(space, pyplot.gcf(), ax, log=log, clipping=clipping, fit=fit, norm=norm, colorbar=colorbar, labels=labels, **plotopts) else: return binoculars.plot.plot(space, pyplot.gcf(), pyplot.gca(), log=log, clipping=clipping, fit=fit, norm=norm, colorbar=colorbar, labels=labels, **plotopts) else: raise TypeError("'{0!r}' is not a binoculars space".format(space)) def transform(space, labels, resolutions, exprs): ''' transformation of the coordinates. Parameters space: binoculars space labels: list a list of length N with the labels resolutions: list a list of length N with the resolution per label exprs: list a list of length N with strings containing the expressions that will be evaluated. all numpy funtions can be called without adding 'numpy.' to the functions. Returns A binoculars space of dimension N with labels and resolutions specified in the input Examples: >>> space = binoculars.load('test.hdf5') >>> space Axes (3 dimensions, 2848 points, 33.0 kB) { Axis qx (min=-0.01, max=0.0, res=0.01, count=2) Axis qy (min=-0.04, max=-0.01, res=0.01, count=4) Axis qz (min=0.48, max=4.03, res=0.01, count=356) } >>> newspace = binoculars.transform(space, ['twotheta'], [0.003], ['2 * arcsin(0.51 * (sqrt(qx**2 + qy**2 + qz**2) / (4 * pi)) / (pi * 180))']) >>> newspace Axes (1 dimensions, 152 points, 1.0 kB) { Axis twotheta (min=0.066, max=0.519, res=0.003, count=152) } ''' import binoculars.util import binoculars.space if isinstance(space, binoculars.space.Space): transformation = binoculars.util.transformation_from_expressions(space, exprs) newspace = space.transform_coordinates(resolutions, labels, transformation) else: raise TypeError("'{0!r}' is not a binoculars space".format(space)) return newspace def fitspace(space, function, guess=None): ''' fit the space data. Parameters space: binoculars space function: list a string with the name of the desired function. supported are: lorentzian (automatically selects 1d or 2d), gaussian1d and voigt1d guess: list a list of length N with the resolution per label Returns A binoculars fit object. Examples: >>> fit = binoculars.fitspace(space, 'lorentzian') >>> print(fit.summary) I: 1.081e-07 +/- inf loc: 0.3703 +/- inf gamma: 0.02383 +/- inf slope: 0.004559 +/- inf offset: -0.001888 +/- inf >>> parameters = fit.parameters >>> data = fit.fitdata >>> binoculars.plotspace(space, fit = data) ''' import binoculars.fit if isinstance(space, binoculars.space.Space): fitclass = binoculars.fit.get_class_by_name(function) return fitclass(space, guess) else: raise TypeError("'{0!r}' is not a binoculars space".format(space)) return newspace def info(filename): ''' Explore the file without loading the file, or after loading the file Parameters filename: filename or space Examples: >>> print binoculars.info('test.hdf5') Axes (3 dimensions, 46466628 points, 531.0 MB) { Axis H (min=-0.1184, max=0.0632, res=0.0008, count=228) Axis K (min=-1.1184, max=-0.9136, res=0.0008, count=257) Axis L (min=0.125, max=4.085, res=0.005, count=793) } ConfigFile{ [dispatcher] [projection] [input] } origin = test.hdf5 >>> space = binoculars.load('test.hdf5') >>> print binoculars.info(space) Axes (3 dimensions, 46466628 points, 531.0 MB) { Axis H (min=-0.1184, max=0.0632, res=0.0008, count=228) Axis K (min=-1.1184, max=-0.9136, res=0.0008, count=257) Axis L (min=0.125, max=4.085, res=0.005, count=793) } ConfigFile{ [dispatcher] [projection] [input] } origin = test.hdf5 ''' import binoculars.space ret = '' if isinstance(filename, binoculars.space.Space): ret += '{!r}\n{!r}'.format(filename, filename.config) elif type(filename) == str: if os.path.exists(filename): try: axes = binoculars.space.Axes.fromfile(filename) except Exception as e: raise IOError('{0}: unable to load Space: {1!r}'.format(filename, e)) ret += '{!r}\n'.format(axes) try: config = binoculars.util.ConfigFile.fromfile(filename) except Exception as e: raise IOError('{0}: unable to load util.ConfigFile: {1!r}'.format(filename, e)) ret += '{!r}'.format(config) else: raise IOError("File '{0}' does not exist".format(filename)) return ret binoculars-0.0.4/binoculars/backend.py000066400000000000000000000111721343276063200177660ustar00rootroot00000000000000from . import util, errors, dispatcher class ProjectionBase(util.ConfigurableObject): def parse_config(self, config): super(ProjectionBase, self).parse_config(config) res = config.pop('resolution') # or just give 1 number for all dimensions self.config.limits = util.parse_pairs(config.pop('limits', None)) # Optional, set the limits of the space object in projected coordinates. Syntax is same as numpy e.g. 'limits = [:0,-1:,:], [0:,:-1,:], [:0,:-1,:], [0:,-1:,:]' labels = self.get_axis_labels() if not self.config.limits is None: for lim in self.config.limits: if len(lim) != len(labels): raise errors.ConfigError('dimension mismatch between projection axes ({0}) and limits specification ({1}) in {2}'.format(labels, self.config.limits, self.__class__.__name__)) if ',' in res: self.config.resolution = util.parse_tuple(res, type=float) if not len(labels) == len(self.config.resolution): raise errors.ConfigError('dimension mismatch between projection axes ({0}) and resolution specification ({1}) in {2}'.format(labels, self.config.resolution, self.__class__.__name__)) else: self.config.resolution = tuple([float(res)] * len(labels)) def project(self, *args): raise NotImplementedError def get_axis_labels(self): raise NotImplementedError class Job(object): weight = 1. # estimate of job difficulty (arbitrary units) def __init__(self, **kwargs): self.__dict__.update(kwargs) class InputBase(util.ConfigurableObject): """Generate and process Job()s. Note: there is no guarantee that generate_jobs() and process_jobs() will be called on the same instance, not even in the same process or on the same computer!""" def parse_config(self, config): super(InputBase, self).parse_config(config) self.config.target_weight = int(config.pop('target_weight', 1000)) # # approximate number of images per job, only useful when running on the oar cluster def generate_jobs(self, command): """Receives command from user, yields Job() instances""" raise NotImplementedError def process_job(self, job): """Receives a Job() instance, yields (intensity, args_to_be_sent_to_a_Projection_instance) Job()s could have been pickle'd and distributed over a cluster""" self.metadata = util.MetaBase('job', job.__dict__) def get_destination_options(self, command): """Receives the same command as generate_jobs(), but returns dictionary that will be used to .format() the dispatcher:destination configuration value.""" return {} def get_dispatcher(config, main, default=None): return _get_backend(config, 'dispatcher', dispatcher.DispatcherBase, default=default, args=[main]) def get_input(config, default=None): return _get_backend(config, 'input', InputBase, default=default) def get_projection(config, default=None): return _get_backend(config, 'projection', ProjectionBase, default=default) def _get_backend(config, section, basecls, default=None, args=[], kwargs={}): if isinstance(config, util.ConfigSection): return config.class_(config, *args, **kwargs) type = config.pop('type', default) if type is None: raise errors.ConfigError("required option 'type' not given in section '{0}'".format(section)) type = type.strip() if ':' in type: try: modname, clsname = type.split(':') except ValueError: raise errors.ConfigError("invalid type '{0}' in section '{1}'".format(type, section)) try: backend = __import__('backends.{0}'.format(modname), globals(), locals(), [], 1) except ImportError as e: raise errors.ConfigError("unable to import module backends.{0}: {1}".format(modname, e)) module = getattr(backend, modname) elif section == 'dispatcher': module = dispatcher clsname = type else: raise errors.ConfigError("invalid type '{0}' in section '{1}'".format(type, section)) clsname = clsname.lower() names = dict((name.lower(), name) for name in dir(module)) if clsname in names: cls = getattr(module, names[clsname]) if issubclass(cls, basecls): return cls(config, *args, **kwargs) else: raise errors.ConfigError("type '{0}' not compatible in section '{1}': expected class derived from '{2}', got '{3}'".format(type, section, basecls.__name__, cls.__name__)) else: raise errors.ConfigError("invalid type '{0}' in section '{1}'".format(type, section)) binoculars-0.0.4/binoculars/backends/000077500000000000000000000000001343276063200175755ustar00rootroot00000000000000binoculars-0.0.4/binoculars/backends/__init__.py000066400000000000000000000000001343276063200216740ustar00rootroot00000000000000binoculars-0.0.4/binoculars/backends/bm25.py000066400000000000000000000220351343276063200207160ustar00rootroot00000000000000""" BINocular backend for beamline BM25, branch B first endstation [1] This backend should serve as a basic implementation of a backend based on xrayutilities [2]. It uses the information from the edf files (motors position and detector image) ignoring the spec file, except for using its scan numbers to identify images belonging to the same scan. You should use CCD file names generated with the following pattern: filename_#n_#p_#r.edf (n: spec-scan number, p: point number, r: image number) Binning (2,2) The backend is called 'EH2SCD'. Created on 2014-10-28 [1] http://www.esrf.eu/UsersAndScience/Experiments/CRG/BM25/BeamLine/experimentalstations/Single_Crystal_Diffraction [2] http://xrayutilities.sourceforge.net/ author: Dominik Kriegner (dominik.kriegner@gmail.com) """ import sys import os import glob import numpy import xrayutilities as xu from .. import backend, errors, util class HKLProjection(backend.ProjectionBase): # scalars: mu, theta, phi, chi, ccdty, ccdtx, ccdtz, ccdth, wavelength # 3x3 matrix: UB def project(self, mu, theta, phi, chi, ccdty, ccdtx, ccdtz, ccdth, ccdtr, wavelength, UB, qconv): qconv.wavelength = wavelength h, k, l = qconv.area(mu, theta, phi, chi, ccdty, ccdtx, ccdtz, ccdth, UB=UB.reshape((3, 3))) return (h, k, l) def get_axis_labels(self): return 'H', 'K', 'L' class HKProjection(HKLProjection): def project(self, mu, theta, phi, chi, ccdty, ccdtx, ccdtz, ccdth, ccdtr, wavelength, UB, qconv): H, K, L = super(HKProjection, self).project(mu, theta, phi, chi, ccdty, ccdtx, ccdtz, ccdth, ccdtr, wavelength, UB, qconv) return (H, K) def get_axis_labels(self): return 'H', 'K' class QProjection(backend.ProjectionBase): def project(self, mu, theta, phi, chi, ccdty, ccdtx, ccdtz, ccdth, ccdtr, wavelength, UB, qconv): qconv.wavelength = wavelength qx, qy, qz = qconv.area(mu, theta, phi, chi, ccdty, ccdtx, ccdtz, ccdth, ccdtr, UB=numpy.identity(3)) return (qx, qy, qz) def get_axis_labels(self): return 'qx', 'qy', 'qz' class QinpProjection(backend.ProjectionBase): def project(self, mu, theta, phi, chi, ccdty, ccdtx, ccdtz, ccdth, ccdtr, wavelength, UB, qconv): qconv.wavelength = wavelength qx, qy, qz = qconv.area(mu, theta, phi, chi, ccdty, ccdtx, ccdtz, ccdth, ccdtr, UB=numpy.identity(3)) return (numpy.sqrt(qx**2+qy**2), qz) def get_axis_labels(self): return 'qinp', 'qz' class EDFInput(backend.InputBase): # OFFICIAL API def generate_jobs(self, command): scans = util.parse_multi_range(','.join(command).replace(' ', ',')) imgs = self.list_images(scans) imgcount = len(imgs) if not len(imgs): sys.stderr.write('error: no images selected, nothing to do\n') #next(self.get_images(imgs, 0, imgcount-1, dry_run=True))# dryrun for s in util.chunk_slicer(imgcount, self.config.target_weight): yield backend.Job(images=imgs, firstimage=s.start, lastimage=s.stop-1, weight=s.stop-s.start) def process_job(self, job): super(EDFInput, self).process_job(job) images = self.get_images(job.images, job.firstimage, job.lastimage) # iterator! for image in images: yield self.process_image(image) def parse_config(self, config): super(EDFInput, self).parse_config(config) self.config.xmask = util.parse_multi_range(config.pop('xmask')) self.config.ymask = util.parse_multi_range(config.pop('ymask')) self.config.imagefile = config.pop('imagefile') self.config.UB = config.pop('ub', None) if self.config.UB: self.config.UB = util.parse_tuple(self.config.UB, length=9, type=float) self.config.sddx = float(config.pop('sddx_offset')) self.config.sddy = float(config.pop('sddy_offset')) self.config.sddz = float(config.pop('sddz_offset')) self.config.ccdth0 = float(config.pop('ccdth_offset')) self.config.pixelsize = util.parse_tuple(config.pop('pixelsize'), length=2, type=float) self.config.centralpixel = util.parse_tuple(config.pop('centralpixel'), length=2, type=float) def get_destination_options(self, command): if not command: return False command = ','.join(command).replace(' ', ',') scans = util.parse_multi_range(command) return dict(first=min(scans), last=max(scans), range=','.join(command)) # CONVENIENCE FUNCTIONS @staticmethod def apply_mask(data, xmask, ymask): roi = data[ymask, :] return roi[:, xmask] # MAIN LOGIC def list_images(self, scannrs): pattern = self.config.imagefile imgfiles = [] # check if necessary image-files exist for nr in scannrs: try: fpattern = pattern.format(scannr=nr) except Exception as e: raise errors.ConfigError("invalid 'imagefile' specification '{0}': {1}".format(self.config.imagefile, e)) files = glob.glob(fpattern) if len(files)==0: raise errors.FileError("needed file do not exist: scannr {0}".format(nr)) else: imgfiles += files return imgfiles def get_images(self, imgs, first, last, dry_run=False): for i in range(first,last+1): img = imgs[i] if dry_run: yield else: edf = xu.io.EDFFile(img) yield edf class EH2SCD(EDFInput): monitor_counter = 'C_mont' # define BM25 goniometer, SIXC geometry? with 2D detector mounted on # translation-axes # see http://www.esrf.eu/UsersAndScience/Experiments/CRG/BM25/BeamLine/experimentalstations/Single_Crystal_Diffraction # The geometry is: 4S + translations and one det. rotation # sample axis: mu, th, chi, phi # detector axis: translations + theta rotation (to make beam perpendicular # to the detector plane in symmetric arrangement) qconv = xu.experiment.QConversion(['x+', 'z+', 'y+', 'x+'], ['ty', 'tx', 'tz', 'x+', 'ty'], [0, 1, 0]) # convention for coordinate system: y downstream; x in bound; z upwards # (righthanded) # QConversion will set up the goniometer geometry. So the first argument # describes the sample rotations, the second the detector rotations and the # third the primary beam direction. def parse_config(self, config): super(EH2SCD, self).parse_config(config) centralpixel = self.config.centralpixel # define detector parameters roi = (self.config.ymask[0], self.config.ymask[-1]+1, self.config.xmask[0], self.config.xmask[-1]+1) self.qconv.init_area('z-', 'x+', cch1=centralpixel[1], cch2=centralpixel[0], Nch1=1912, Nch2=3825, pwidth1=self.config.pixelsize[1], pwidth2=self.config.pixelsize[0], distance=1e-10, roi=roi) print(('{:>20} {:>9} {:>10} {:>9} {:>9} {:>9}'.format(' ', 'Mu', 'Theta', 'CCD_Y', 'CCD_X', 'CCD_Z'))) def process_image(self, image): # motor positions mu = float(image.header['M_mu']) th = float(image.header['M_th']) chi = float(image.header['M_chi']) phi = float(image.header['M_phi']) # distance 'ctr' corresponds to distance of the detector chip from # the CCD_TH rotation axis. The rest is handled by the translations ctr = -270.0 # measured by ruler only!!! cty = float(image.header['M_CCD_Y'])-self.config.sddy-ctr ctx = float(image.header['M_CCD_X'])-self.config.sddx ctz = float(image.header['M_CCD_Z'])-self.config.sddz cth = float(image.header['M_CCD_TH'])-self.config.ccdth0 # filter correction transm = 1. # no filter correction! (Filters are manual on BM25!) mon = float(image.header[self.monitor_counter]) wavelength = float(image.header['WAVELENGTH']) if self.config.UB: UB = self.config.UB else: UB = self._get_UB(image.header) # normalization data = image.data / mon / transm print(('{:>20} {:9.4f} {:10.4f} {:9.1f} {:9.1f} {:9.1f}'.format(os.path.split(image.filename)[-1] ,mu, th, cty, ctx, ctz))) # masking intensity = self.apply_mask(data, self.config.xmask, self.config.ymask) return intensity, numpy.ones_like(intensity), (mu, th, phi, chi, cty, ctx, ctz, cth, ctr,## weights added to API. Treated here like before wavelength, UB, self.qconv) @staticmethod def _get_UB(header): ub = numpy.zeros(9) for i in range(9): ub[i] = float(header['UB{:d}'.format(i)]) return ub binoculars-0.0.4/binoculars/backends/bm32.py000066400000000000000000000423721343276063200207220ustar00rootroot00000000000000import sys import os import glob import numpy import time #python3 support PY3 = sys.version_info > (3,) if PY3: pass else: from itertools import izip as zip try: from PyMca import specfilewrapper, EdfFile, SixCircle, specfile except ImportError: from PyMca5.PyMca import specfilewrapper, EdfFile, SixCircle, specfile from .. import backend, errors, util class pixels(backend.ProjectionBase): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): y,x = numpy.mgrid[slice(None,gamma.shape[0]), slice(None,delta.shape[0])] return (y, x) def get_axis_labels(self): return 'y','x' class HKLProjection(backend.ProjectionBase): # arrays: gamma, delta # scalars: theta, mu, chi, phi def project(self, wavelength, UB, beta, delta, omega, alfa, chi, phi): R = SixCircle.getHKL(wavelength, UB, gamma=beta, delta=delta, theta=omega, mu=alfa, chi=chi, phi=phi) shape = beta.size, delta.size H = R[0,:].reshape(shape) K = R[1,:].reshape(shape) L = R[2,:].reshape(shape) return (H, K, L) def get_axis_labels(self): return 'H', 'K', 'L' class HKProjection(HKLProjection): def project(self, wavelength, UB, beta, delta, omega, alfa, chi, phi): H, K, L = super(HKProjection, self).project( wavelength, UB, beta, delta, omega, alfa, chi, phi) return (H, K) def get_axis_labels(self): return 'H', 'K' class ThetaLProjection(backend.ProjectionBase): # arrays: gamma, delta # scalars: theta, mu, chi, phi def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): R = SixCircle.getHKL(wavelength, UB, gamma=gamma, delta=delta, theta=theta, mu=mu, chi=chi, phi=phi) shape = gamma.size, delta.size L = R[2,:].reshape(shape) theta_array = numpy.ones_like(L) * theta return (theta_array,L) def get_axis_labels(self): return 'Theta', 'L' class QProjection(backend.ProjectionBase): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): shape = gamma.size, delta.size sixc = SixCircle.SixCircle() sixc.setLambda(wavelength) sixc.setUB(UB) R = sixc.getQSurface(gamma=gamma, delta=delta, theta=theta, mu=mu, chi=chi, phi=phi) qz = R[0,:].reshape(shape) qy = R[1,:].reshape(shape) qx = R[2,:].reshape(shape) return (qz, qy, qx) def get_axis_labels(self): return 'qx', 'qy', 'qz' class SphericalQProjection(QProjection): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): qz, qy, qx = super(SphericalQProjection, self).project(wavelength, UB, gamma, delta, theta, mu, chi, phi) q = numpy.sqrt(qx**2 + qy**2 + qz**2) theta = numpy.arccos(qz / q) phi = numpy.arctan2(qy, qx) return (q, theta, phi) def get_axis_labels(self): return 'Q', 'Theta', 'Phi' class CylindricalQProjection(QProjection): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): qz, qy, qx = super(CylindricalQProjection, self).project(wavelength, UB, gamma, delta, theta, mu, chi, phi) qpar = numpy.sqrt(qx**2 + qy**2) phi = numpy.arctan2(qy, qx) return (qpar, qz, phi) def get_axis_labels(self): return 'qpar', 'qz', 'Phi' class nrQProjection(backend.ProjectionBase): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): k0 = 2 * numpy.pi / wavelength delta, gamma = numpy.meshgrid(delta, gamma) mu *= numpy.pi/180 delta *= numpy.pi/180 gamma *= numpy.pi/180 qy = k0 * (numpy.cos(gamma) * numpy.cos(delta) - numpy.cos(mu)) ## definition of qx, and qy same as spec at theta = 0 qx = k0 * (numpy.cos(gamma) * numpy.sin(delta)) qz = k0 * (numpy.sin(gamma) + numpy.sin(mu)) return (qx, qy, qz) def get_axis_labels(self): return 'qx', 'qy', 'qz' class TwoThetaProjection(SphericalQProjection): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): q, theta, phi = super(TwoThetaProjection, self).project(wavelength, UB, gamma, delta, theta, mu, chi, phi) return 2 * numpy.arcsin(q * wavelength / (4 * numpy.pi)) / numpy.pi * 180, # note: we need to return a 1-tuple? def get_axis_labels(self): return 'TwoTheta' class Qpp(nrQProjection): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): qx, qy, qz = super(Qpp, self).project(wavelength, UB, gamma, delta, theta, mu, chi, phi) qpar = numpy.sqrt(qx**2 + qy**2) qpar[numpy.sign(qx) == -1] *= -1 return (qpar, qz) def get_axis_labels(self): return 'Qpar', 'Qz' class GammaDeltaTheta(HKLProjection):#just passing on the coordinates, makes it easy to accurately test the theta correction def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): delta,gamma = numpy.meshgrid(delta,gamma) theta = theta * numpy.ones_like(delta) return (gamma, delta, theta) def get_axis_labels(self): return 'Gamma','Delta','Theta' class GammaDelta(HKLProjection):#just passing on the coordinates, makes it easy to accurately test the theta correction def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): delta,gamma = numpy.meshgrid(delta,gamma) return (gamma, delta) def get_axis_labels(self): return 'Gamma','Delta' class GammaDeltaMu(HKLProjection):#just passing on the coordinates, makes it easy to accurately test the theta correction def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): delta,gamma = numpy.meshgrid(delta,gamma) mu = mu * numpy.ones_like(delta) return (gamma, delta, mu) def get_axis_labels(self): return 'Gamma','Delta','Mu' class BM32Input(backend.InputBase): # OFFICIAL API dbg_scanno = None dbg_pointno = None def generate_jobs(self, command): scans = util.parse_multi_range(','.join(command).replace(' ', ',')) if not len(scans): sys.stderr.write('error: no scans selected, nothing to do\n') for scanno in scans: util.status('processing scan {0}...'.format(scanno)) scan = self.get_scan(scanno) if self.config.pr: pointcount = self.config.pr[1] - self.config.pr[0] + 1 start = self.config.pr[0] else: start = 0 try: pointcount = scan.lines() except specfile.error: # no points continue next(self.get_images(scan, 0, pointcount-1, dry_run=True))# dryrun if pointcount > self.config.target_weight * 1.4: for s in util.chunk_slicer(pointcount, self.config.target_weight): yield backend.Job(scan=scanno, firstpoint=start+s.start, lastpoint=start+s.stop-1, weight=s.stop-s.start) else: yield backend.Job(scan=scanno, firstpoint=start, lastpoint=start+pointcount-1, weight=pointcount) def process_job(self, job): super(BM32Input, self).process_job(job) scan = self.get_scan(job.scan) self.metadict = dict() try: scanparams = self.get_scan_params(scan) # wavelength, UB pointparams = self.get_point_params(scan, job.firstpoint, job.lastpoint) # 2D array of diffractometer angles + mon + transm images = self.get_images(scan, job.firstpoint, job.lastpoint) # iterator! for pp, image in zip(pointparams, images): yield self.process_image(scanparams, pp, image) util.statuseol() except Exception as exc: #exc.args = errors.addmessage(exc.args, ', An error occured for scan {0} at point {1}. See above for more information'.format(self.dbg_scanno, self.dbg_pointno)) raise self.metadata.add_section('id03_backend', self.metadict) def parse_config(self, config): super(BM32Input, self).parse_config(config) self.config.xmask = util.parse_multi_range(config.pop('xmask', None))#Optional, select a subset of the image range in the x direction. all by default self.config.ymask = util.parse_multi_range(config.pop('ymask', None))#Optional, select a subset of the image range in the y direction. all by default self.config.specfile = config.pop('specfile')#Location of the specfile self.config.imagefolder = config.pop('imagefolder', None) #Optional, takes specfile folder tag by default self.config.pr = util.parse_tuple(config.pop('pr', None), length=2, type=int) #Optional, all range by default self.config.background = config.pop('background', None) #Optional, if supplied a space of this image is constructed if self.config.xmask is None: self.config.xmask = slice(None) if self.config.ymask is None: self.config.ymask = slice(None) self.config.maskmatrix = load_matrix(config.pop('maskmatrix', None)) #Optional, if supplied pixels where the mask is 0 will be removed self.config.sdd = config.pop('sdd', None)# sample to detector distance (mm) if self.config.sdd is not None: self.config.sdd = float(self.config.sdd) self.config.pixelsize = util.parse_tuple(config.pop('pixelsize', None), length=2, type=float)# pixel size x/y (mm) (same dimension as sdd) def get_destination_options(self, command): if not command: return False command = ','.join(command).replace(' ', ',') scans = util.parse_multi_range(command) return dict(first=min(scans), last=max(scans), range=','.join(str(scan) for scan in scans)) # CONVENIENCE FUNCTIONS _spec = None def get_scan(self, scannumber): if self._spec is None: self._spec = specfilewrapper.Specfile(self.config.specfile) return self._spec.select('{0}.1'.format(scannumber)) def find_edfs(self, pattern): files = glob.glob(pattern) ret = {} for file in files: try: filename = os.path.basename(file).split('.')[0] imno = int(filename.split('_')[-1].split('-')[-1]) ret[imno] = file except ValueError: continue return ret @staticmethod def apply_mask(data, xmask, ymask): roi = data[ymask, :] return roi[:, xmask] # MAIN LOGIC def get_scan_params(self, scan): self.dbg_scanno = scan.number() UB = numpy.array(scan.header('G')[2].split(' ')[-9:],dtype=numpy.float) wavelength = float(scan.header('G')[1].split(' ')[-1]) self.metadict['UB'] = UB self.metadict['wavelength'] = wavelength return wavelength, UB def get_images(self, scan, first, last, dry_run=False): imagenos = numpy.array(scan.datacol('img')[slice(first, last + 1)], dtype = numpy.int) + 1##error in spec?! if self.config.background: if not os.path.exists(self.config.background): raise errors.FileError('could not find background file {0}'.format(self.config.background)) if dry_run: yield else: edf = EdfFile.EdfFile(self.config.background) for i in range(first, last+1): self.dbg_pointno = i yield edf else: try: uccdtagline = scan.header('M')[0].split()[-1] UCCD = os.path.dirname(uccdtagline).split(os.sep) except: print('warning: UCCD tag not found, use imagefolder for proper file specification') UCCD = [] pattern = self._get_pattern(UCCD) matches = self.find_edfs(pattern) if not set(imagenos).issubset(set(matches.keys())): raise errors.FileError("incorrect number of matches for scan {0} using pattern {1}".format(scan.number(), pattern)) if dry_run: yield else: for i in imagenos: self.dbg_pointno = i edf = EdfFile.EdfFile(matches[i]) yield edf def _get_pattern(self,UCCD): imagefolder = self.config.imagefolder if imagefolder: try: imagefolder = imagefolder.format(UCCD=UCCD, rUCCD=list(reversed(UCCD))) except Exception as e: raise errors.ConfigError("invalid 'imagefolder' specification '{0}': {1}".format(self.config.imagefolder, e)) else: if not os.path.exists(imagefolder): raise errors.ConfigError("invalid 'imagefolder' specification '{0}'. Path {1} does not exist".format(self.config.imagefolder, imagefolder)) else: imagefolder = os.path.join(*UCCD) if not os.path.exists(imagefolder): raise errors.ConfigError("invalid UCCD tag '{0}'. The UCCD tag in the specfile does not point to an existing folder. Specify the imagefolder in the configuration file.".format(imagefolder)) return os.path.join(imagefolder, '*') class EH1(BM32Input): def parse_config(self, config): super(EH1, self).parse_config(config) self.config.centralpixel = util.parse_tuple(config.pop('centralpixel', None), length=2, type=int) self.config.UB = util.parse_tuple(config.pop('ub', None), length=9, type=float) def process_image(self, scanparams, pointparams, edf): delta, omega, alfa, beta, chi, phi, mon, transm = pointparams wavelength, UB = scanparams image = edf.GetData(0) header = edf.GetHeader(0) weights = numpy.ones_like(image) if not self.config.centralpixel: self.config.centralpixel = (int(header['y_beam']), int(header['x_beam'])) if not self.config.sdd: self.config.sdd = float(header['det_sample_dist']) if self.config.background: data = image / mon else: data = image / mon / transm if mon == 0: raise errors.BackendError('Monitor is zero, this results in empty output. Scannumber = {0}, pointnumber = {1}. Did you forget to open the shutter?'.format(self.dbg_scanno, self.dbg_pointno)) util.status('{4}| beta: {0:.3f}, delta: {1:.3f}, omega: {2:.3f}, alfa: {3:.3f}'.format(beta, delta, omega, alfa, time.ctime(time.time()))) # pixels to angles pixelsize = numpy.array(self.config.pixelsize) sdd = self.config.sdd app = numpy.arctan(pixelsize / sdd) * 180 / numpy.pi centralpixel = self.config.centralpixel # (column, row) = (delta, gamma) beta_range= -app[1] * (numpy.arange(data.shape[1]) - centralpixel[1]) + beta delta_range= app[0] * (numpy.arange(data.shape[0]) - centralpixel[0]) + delta # masking if self.config.maskmatrix is not None: if self.config.maskmatrix.shape != data.shape: raise errors.BackendError('The mask matrix does not have the same shape as the images') weights *= self.config.maskmatrix delta_range = delta_range[self.config.ymask] beta_range = beta_range[self.config.xmask] weights = self.apply_mask(weights, self.config.xmask, self.config.ymask) intensity = self.apply_mask(data, self.config.xmask, self.config.ymask) intensity = numpy.rot90(intensity) intensity = numpy.fliplr(intensity) intensity = numpy.flipud(intensity) weights = numpy.rot90(weights) weights = numpy.fliplr(weights) weights = numpy.flipud(weights) #polarisation correction delta_grid, beta_grid = numpy.meshgrid(delta_range, beta_range) Pver = 1 - numpy.sin(delta_grid * numpy.pi / 180.)**2 * numpy.cos(beta_grid * numpy.pi / 180.)**2 #intensity /= Pver return intensity, weights, (wavelength, UB, beta_range, delta_range, omega, alfa, chi, phi) def get_point_params(self, scan, first, last): sl = slice(first, last+1) DEL, OME, ALF, BET, CHI, PHI, MON, TRANSM = list(range(8)) params = numpy.zeros((last - first + 1, 8)) # gamma delta theta chi phi mu mon transm params[:, CHI] = 0 #scan.motorpos('CHI') params[:, PHI] = 0 #scan.motorpos('PHI') params[:, OME] = scan.datacol('omecnt')[sl] params[:, BET] = scan.datacol('betcnt')[sl] params[:, DEL] = scan.datacol('delcnt')[sl] params[:, MON] = scan.datacol('Monitor')[sl] #params[:, TRANSM] = scan.datacol('transm')[sl] params[:, TRANSM] = 1 params[:, ALF] = scan.datacol('alfcnt')[sl] return params def load_matrix(filename): if filename == None: return None if os.path.exists(filename): ext = os.path.splitext(filename)[-1] if ext == '.txt': return numpy.array(numpy.loadtxt(filename), dtype = numpy.bool) elif ext == '.npy': return numpy.array(numpy.load(filename), dtype = numpy.bool) elif ext == '.edf': return numpy.array(EdfFile.EdfFile(filename).getData(0),dtype = numpy.bool) else: raise ValueError('unknown extension {0}, unable to load matrix!\n'.format(ext)) else: raise IOError('filename: {0} does not exist. Can not load matrix'.format(filename)) binoculars-0.0.4/binoculars/backends/example.py000066400000000000000000000160241343276063200216050ustar00rootroot00000000000000import sys import os import itertools import numpy from .. import backend, errors, util ''' This example backend contains the minimal set of functions needed to construct a backend. It consists of a child of a backend.InputBase class and a child of a backend.ProjectionBase class. The backend.Inputbase is collects the data from the measurement. The backend.ProjectionBase class calculates the new coordinates per pixel. You can write as much input classes and as much projections in one backend as you prefer, provided that the output of the inputclass is compatible with projection class. Otherwise you will be served best by writing a new backend, for the incompatibility will create errors that break the script. In the configuration file you specify the inputclass and projection needed for the treatment of the dataset. ''' class QProjection(backend.ProjectionBase): def project(self, wavelength, af, delta, omega, ai): ''' This class takes as input the tuple of coordinates returned by the process_job method in the backend.InputBase class. Here you specify how to project the coordinates that belong to every datapoint. The number of input arguments should match the second tuple returned by process_job. The shape of each returned array should match the shape of the first argument returned by process_job ''' k0 = 2 * numpy.pi / wavelength qy = k0 * (numpy.cos(af) * numpy.cos(delta) - numpy.cos(ai) * numpy.cos(omega)) qx = k0 * (numpy.cos(af) * numpy.sin(delta) - numpy.cos(ai) * numpy.sin(omega)) qz = k0 * (numpy.sin(af) + numpy.sin(ai)) return (qx.flatten(), qy.flatten(), qz.flatten()) # a tuple of numpy.arrays with the same dimension as the number of labels def get_axis_labels(self): ''' Specify the names of the axes. The number of labels should be equal to the number of arrays returned in the project method. ''' return 'qx', 'qy', 'qz' class Input(backend.InputBase): def generate_jobs(self, command): ''' Command is supplied when the program is started in the terminal. This can used to differentiate between separate datasets that will be processed independently. ''' scans = util.parse_multi_range(','.join(command).replace(' ', ','))# parse the command for scanno in scans: yield backend.Job(scan=scanno) def process_job(self, job): ''' This methods is a generator that returns the intensity, the weights and a tuple of coordinates that will be used for projection. The input is a backend.job object. This objects contains attributes that are supplied as keyword arguments in the generate_jobs method when backend.Job is instantiated. You can wet here the weights according the behaviour of your detector. To select normal averaging give the weights the value of ones. This array should be the same shape as the intensity array. This example backend simulates a random path through angular space starting at the origin. an example image will be generated using a three dimensional 10-slit interference function. The angles are with respect to the sample where af and delta are the angular coordinates of the pixels and ai and omega are the in plane and out of plane angles of the incoming beam. ''' super(Input, self).process_job(job)# call super to fix metadeta handling scan = job.scan #reflects a scan with 100 datapoints aaf = numpy.linspace(0, numpy.random.random() * 20, 100) adelta = numpy.linspace(0, numpy.random.random() * 20, 100) aai = numpy.linspace(0, numpy.random.random() * 20, 100) aomega = numpy.linspace(0, numpy.random.random() * 20, 100) for af, delta, ai, omega in zip(aaf, adelta, aai, aomega): print('af: {0}, delta: {1}, ai: {2}, omega: {3}'.format(af, delta, ai, omega)) # caculating the angles per pixel. The values specified in the configuration file # can be used for calculating these values pixelsize = numpy.array(self.config.pixelsize) sdd = self.config.sdd app = numpy.arctan(pixelsize / sdd) * 180 / numpy.pi # create an image of 100 x 100 pixels and calculate the coordinates corresponding to every pixel centralpixel = self.config.centralpixel # (column, row) = (delta, af) af_range= -app[1] * (numpy.arange(100) - centralpixel[1]) + af delta_range= app[0] * (numpy.arange(100) - centralpixel[0]) + delta #calculating the coordinates for simulating the image. This is only included #in this example for simulating of the images. It has no other use. k0 = 2 * numpy.pi / self.config.wavelength delta, af = numpy.meshgrid(delta_range, af_range) ai *= numpy.pi/180 delta *= numpy.pi/180 af *= numpy.pi/180 omega *= numpy.pi/180 qy = k0 * (numpy.cos(af) * numpy.cos(delta) - numpy.cos(ai) * numpy.cos(omega)) qx = k0 * (numpy.cos(af) * numpy.sin(delta) - numpy.cos(ai) * numpy.sin(omega)) qz = k0 * (numpy.sin(af) + numpy.sin(ai)) #simulating the image data = numpy.abs(numpy.sin(qx * 10) / numpy.sin(qx) * numpy.sin(qy * 10) / numpy.sin(qy) * numpy.sin(qz * 10) / numpy.sin(qz))**2 weights = numpy.ones_like(data) yield data, weights, (self.config.wavelength, af, delta, omega, ai) def parse_config(self, config): ''' To collect and process data you need the values provided in the configuration file. These you can access locally through the provided config object. This is a dict with as keys the labels given in the configfile. To use them outside the parse_config method you attribute them to the self.config object which can be used throughout the input class. A warning will be generated afterwards for config values not popped out of the dict. ''' super(Input, self).parse_config(config) self.config.sdd = float(config.pop('sdd')) self.config.pixelsize = util.parse_tuple(config.pop('pixelsize'), length=2, type=float) self.config.centralpixel = util.parse_tuple(config.pop('centralpixel'), length=2, type=int) self.config.wavelength = float(config.pop('wavelength')) def get_destination_options(self, command): ''' Creates the arguments that you can use to construct an output filename. This method returns a dict object with keys that will can be used in the configfile. In the configfile the output filename can now be described as 'destination = demo_{first}-{last}.hdf5'. This helps to organise the output automatically. ''' if not command: return False command = ','.join(command).replace(' ', ',') scans = util.parse_multi_range(command) return dict(first=min(scans), last=max(scans), range=','.join(command)) binoculars-0.0.4/binoculars/backends/id03.py000066400000000000000000001151701343276063200207130ustar00rootroot00000000000000import sys import os import itertools import glob import numpy import time #python3 support PY3 = sys.version_info > (3,) if PY3: pass else: from itertools import izip as zip try: from PyMca import specfilewrapper, EdfFile, SixCircle, specfile except ImportError: from PyMca5.PyMca import specfilewrapper, EdfFile, SixCircle, specfile from .. import backend, errors, util class pixels(backend.ProjectionBase): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): y, x = numpy.mgrid[slice(None, gamma.shape[0]), slice(None, delta.shape[0])] return (y, x) def get_axis_labels(self): return 'y', 'x' class HKLProjection(backend.ProjectionBase): # arrays: gamma, delta # scalars: theta, mu, chi, phi def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): R = SixCircle.getHKL(wavelength, UB, gamma=gamma, delta=delta, theta=theta, mu=mu, chi=chi, phi=phi) shape = gamma.size, delta.size H = R[0, :].reshape(shape) K = R[1, :].reshape(shape) L = R[2, :].reshape(shape) return (H, K, L) def get_axis_labels(self): return 'H', 'K', 'L' class HKProjection(HKLProjection): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): H, K, L = super(HKProjection, self).project(wavelength, UB, gamma, delta, theta, mu, chi, phi) return (H, K) def get_axis_labels(self): return 'H', 'K' class specularangles(backend.ProjectionBase): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): delta, gamma = numpy.meshgrid(delta, gamma) mu *= numpy.pi/180 delta *= numpy.pi/180 gamma *= numpy.pi/180 chi *= numpy.pi/180 phi *= numpy.pi/180 theta *= numpy.pi/180 def mat(u, th): ux, uy, uz = u[0], u[1], u[2] sint = numpy.sin(th) cost = numpy.cos(th) mcost = (1 - numpy.cos(th)) return numpy.matrix([[cost + ux**2 * mcost, ux * uy * mcost - uz * sint, ux * uz * mcost + uy * sint], [uy * ux * mcost + uz * sint, cost + uy**2 * mcost, uy * uz - ux * sint], [uz * ux * mcost - uy * sint, uz * uy * mcost + ux * sint, cost + uz**2 * mcost]]) def rot(vx, vy, vz, u, th): R = mat(u, th) return R[0, 0] * vx + R[0, 1] * vy + R[0, 2] * vz, R[1, 0] * vx + R[1, 1] * vy + R[1, 2] * vz, R[2, 0] * vx + R[2, 1] * vy + R[2, 2] * vz #what are the angles of kin and kout in the sample frame? #angles in the hexapod frame koutx, kouty, koutz = numpy.sin(- numpy.pi / 2 + gamma) * numpy.cos(delta), numpy.sin(- numpy.pi / 2 + gamma) * numpy.sin(delta), numpy.cos(- numpy.pi / 2 + gamma) kinx, kiny, kinz = numpy.sin(numpy.pi / 2 - mu), 0, numpy.cos(numpy.pi / 2 - mu) #now we rotate the frame around hexapod rotation th xaxis = numpy.array(rot(1, 0, 0, numpy.array([0, 0, 1]), theta)) yaxis = numpy.array(rot(0, 1, 0, numpy.array([0, 0, 1]), theta)) #first we rotate the sample around the xaxis koutx, kouty, koutz = rot(koutx, kouty, koutz, xaxis, chi) kinx, kiny, kinz = rot(kinx, kiny, kinz, xaxis, chi) yaxis = numpy.array(rot(yaxis[0], yaxis[1], yaxis[2], xaxis, chi)) # we also have to rotate the yaxis #then we rotate the sample around the yaxis koutx, kouty, koutz = rot(koutx, kouty, koutz, yaxis, phi) kinx, kiny, kinz = rot(kinx, kiny, kinz, yaxis, phi) #to calculate the equivalent gamma, delta and mu in the sample frame we rotate the frame around the sample z which is 0,0,1 back = numpy.arctan2(kiny, kinx) koutx, kouty, koutz = rot(koutx, kouty, koutz, numpy.array([0, 0, 1]), -back) kinx, kiny, kinz = rot(kinx, kiny, kinz, numpy.array([0, 0, 1]), -back) mu = numpy.arctan2(kinz, kinx) * numpy.ones_like(delta) delta = numpy.pi - numpy.arctan2(kouty, koutx) gamma = numpy.pi - numpy.arctan2(koutz, koutx) delta[delta > numpy.pi] -= 2 * numpy.pi gamma[gamma > numpy.pi] -= 2 * numpy.pi mu *= 1 / numpy.pi * 180 delta *= 1 / numpy.pi * 180 gamma *= 1 / numpy.pi * 180 return (gamma - mu, gamma + mu, delta) def get_axis_labels(self): return 'g-m', 'g+m', 'delta' class ThetaLProjection(backend.ProjectionBase): # arrays: gamma, delta # scalars: theta, mu, chi, phi def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): R = SixCircle.getHKL(wavelength, UB, gamma=gamma, delta=delta, theta=theta, mu=mu, chi=chi, phi=phi) shape = gamma.size, delta.size L = R[2, :].reshape(shape) theta_array = numpy.ones_like(L) * theta return (theta_array, L) def get_axis_labels(self): return 'Theta', 'L' class QProjection(backend.ProjectionBase): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): shape = gamma.size, delta.size sixc = SixCircle.SixCircle() sixc.setLambda(wavelength) sixc.setUB(UB) R = sixc.getQSurface(gamma=gamma, delta=delta, theta=theta, mu=mu, chi=chi, phi=phi) qz = R[0, :].reshape(shape) qy = R[1, :].reshape(shape) qx = R[2, :].reshape(shape) return (qz, qy, qx) def get_axis_labels(self): return 'qx', 'qy', 'qz' class SphericalQProjection(QProjection): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): qz, qy, qx = super(SphericalQProjection, self).project(wavelength, UB, gamma, delta, theta, mu, chi, phi) q = numpy.sqrt(qx**2 + qy**2 + qz**2) theta = numpy.arccos(qz / q) phi = numpy.arctan2(qy, qx) return (q, theta, phi) def get_axis_labels(self): return 'Q', 'Theta', 'Phi' class CylindricalQProjection(QProjection): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): qz, qy, qx = super(CylindricalQProjection, self).project(wavelength, UB, gamma, delta, theta, mu, chi, phi) qpar = numpy.sqrt(qx**2 + qy**2) phi = numpy.arctan2(qy, qx) return (qpar, qz, phi) def get_axis_labels(self): return 'qpar', 'qz', 'Phi' class nrQProjection(QProjection): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): qx, qy, qz = super(nrQProjection, self).project(wavelength, UB, gamma, delta, 0, mu, chi, phi) return (qx, qy, qz) def get_axis_labels(self): return 'qx', 'qy', 'qz' class TwoThetaProjection(SphericalQProjection): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): q, theta, phi = super(TwoThetaProjection, self).project(wavelength, UB, gamma, delta, theta, mu, chi, phi) return 2 * numpy.arcsin(q * wavelength / (4 * numpy.pi)) / numpy.pi * 180, # note: we need to return a 1-tuple? def get_axis_labels(self): return 'TwoTheta' class Qpp(nrQProjection): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): qx, qy, qz = super(Qpp, self).project(wavelength, UB, gamma, delta, theta, mu, chi, phi) qpar = numpy.sqrt(qx**2 + qy**2) qpar[numpy.sign(qx) == -1] *= -1 return (qpar, qz) def get_axis_labels(self): return 'Qpar', 'Qz' class GammaDeltaTheta(HKLProjection): # just passing on the coordinates, makes it easy to accurately test the theta correction def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): delta, gamma = numpy.meshgrid(delta, gamma) theta = theta * numpy.ones_like(delta) return (gamma, delta, theta) def get_axis_labels(self): return 'Gamma', 'Delta', 'Theta' class GammaDelta(HKLProjection): # just passing on the coordinates, makes it easy to accurately test the theta correction def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): delta, gamma = numpy.meshgrid(delta, gamma) return (gamma, delta) def get_axis_labels(self): return 'Gamma', 'Delta' class GammaDeltaMu(HKLProjection): # just passing on the coordinates, makes it easy to accurately test the theta correction def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): delta, gamma = numpy.meshgrid(delta, gamma) mu = mu * numpy.ones_like(delta) return (gamma, delta, mu) def get_axis_labels(self): return 'Gamma', 'Delta', 'Mu' class QTransformation(QProjection): def project(self, wavelength, UB, gamma, delta, theta, mu, chi, phi): qx, qy, qz = super(QTransformation, self).project(wavelength, UB, gamma, delta, theta, mu, chi, phi) M = self.config.matrix q1 = qx * M[0] + qy * M[1] + qz * M[2] q2 = qx * M[3] + qy * M[4] + qz * M[5] q3 = qx * M[6] + qy * M[7] + qz * M[8] return (q1, q2, q3) def get_axis_labels(self): return 'q1', 'q2', 'q3' def parse_config(self, config): super(QTransformation, self).parse_config(config) self.config.matrix = util.parse_tuple(config.pop('matrix'), length=9, type=float) class ID03Input(backend.InputBase): # OFFICIAL API dbg_scanno = None dbg_pointno = None def generate_jobs(self, command): scans = util.parse_multi_range(','.join(command).replace(' ', ',')) if not len(scans): sys.stderr.write('error: no scans selected, nothing to do\n') for scanno in scans: util.status('processing scan {0}...'.format(scanno)) if self.config.wait_for_data: for job in self.get_delayed_jobs(scanno): yield job else: scan = self.get_scan(scanno) if self.config.pr: pointcount = self.config.pr[1] - self.config.pr[0] + 1 start = self.config.pr[0] else: start = 0 try: pointcount = scan.lines() except specfile.error: # no points continue next(self.get_images(scan, 0, pointcount-1, dry_run=True)) # dryrun if pointcount > self.config.target_weight * 1.4: for s in util.chunk_slicer(pointcount, self.config.target_weight): yield backend.Job(scan=scanno, firstpoint=start+s.start, lastpoint=start+s.stop-1, weight=s.stop-s.start) else: yield backend.Job(scan=scanno, firstpoint=start, lastpoint=start+pointcount-1, weight=pointcount) def get_delayed_jobs(self, scanno): scan = self.get_delayed_scan(scanno) if self.config.pr: firstpoint, lastpoint = self.config.pr # firstpoint is the first index to be included, lastpoint the last index to be included. else: firstpoint, lastpoint = 0, self.target(scan) - 1 pointcount = lastpoint - firstpoint + 1 if self.is_zap(scan): # wait until the scan is finished. if not self.wait_for_points(scanno, self.target(scan), timeout=self.config.timeout): # wait for last datapoint for s in util.chunk_slicer(pointcount, self.config.target_weight): yield backend.Job(scan=scanno, firstpoint=firstpoint+s.start, lastpoint=firstpoint+s.stop-1, weight=s.stop-s.start) else: raise errors.BackendError('Image collection timed out. Zapscan was probably aborted') elif lastpoint >= 0: # scanlength is known for s in util.chunk_slicer(pointcount, self.config.target_weight): if self.wait_for_points(scanno, firstpoint + s.stop, timeout=self.config.timeout): stop = self.get_scan(scanno).lines() yield backend.Job(scan=scanno, firstpoint=firstpoint+s.start, lastpoint=stop-1, weight=s.stop-s.start) break else: yield backend.Job(scan=scanno, firstpoint=firstpoint+s.start, lastpoint=firstpoint+s.stop-1, weight=s.stop-s.start) else: # scanlength is unknown step = int(self.config.target_weight / 1.4) for start, stop in zip(itertools.count(0, step), itertools.count(step, step)): if self.wait_for_points(scanno, stop, timeout=self.config.timeout): stop = self.get_scan(scanno).lines() yield backend.Job(scan=scanno, firstpoint=start, lastpoint=stop-1, weight=stop-start) break else: yield backend.Job(scan=scanno, firstpoint=start, lastpoint=stop-1, weight=stop-start) def process_job(self, job): super(ID03Input, self).process_job(job) scan = self.get_scan(job.scan) self.metadict = dict() try: scanparams = self.get_scan_params(scan) # wavelength, UB pointparams = self.get_point_params(scan, job.firstpoint, job.lastpoint) # 2D array of diffractometer angles + mon + transm images = self.get_images(scan, job.firstpoint, job.lastpoint) # iterator! for pp, image in zip(pointparams, images): yield self.process_image(scanparams, pp, image) util.statuseol() except Exception as exc: exc.args = errors.addmessage(exc.args, ', An error occured for scan {0} at point {1}. See above for more information'.format(self.dbg_scanno, self.dbg_pointno)) raise self.metadata.add_section('id03_backend', self.metadict) def parse_config(self, config): super(ID03Input, self).parse_config(config) self.config.xmask = util.parse_multi_range(config.pop('xmask', None)) # Optional, select a subset of the image range in the x direction. all by default self.config.ymask = util.parse_multi_range(config.pop('ymask', None)) # Optional, select a subset of the image range in the y direction. all by default self.config.specfile = config.pop('specfile') # Location of the specfile self.config.imagefolder = config.pop('imagefolder', None) # Optional, takes specfile folder tag by default self.config.pr = config.pop('pr', None) # Optional, all range by default self.config.background = config.pop('background', None) # Optional, if supplied a space of this image is constructed self.config.th_offset = float(config.pop('th_offset', 0)) # Optional; Only used in zapscans, zero by default. self.config.wavelength = config.pop('wavelength', None) # Optional; Overrides wavelength from specfile. if self.config.wavelength is not None: self.config.wavelength = float(self.config.wavelength) if self.config.xmask is None: self.config.xmask = slice(None) if self.config.ymask is None: self.config.ymask = slice(None) self.config.maskmatrix = load_matrix(config.pop('maskmatrix', None)) # Optional, if supplied pixels where the mask is 0 will be removed if self.config.pr: self.config.pr = util.parse_tuple(self.config.pr, length=2, type=int) self.config.sdd = float(config.pop('sdd')) # sample to detector distance (mm) self.config.pixelsize = util.parse_tuple(config.pop('pixelsize'), length=2, type=float) # pixel size x/y (mm) (same dimension as sdd) self.config.wait_for_data = util.parse_bool(config.pop('wait_for_data', 'false')) # Optional, if true wait until the data appears self.config.timeout = int(config.pop('timeout', 180)) # Optional, how long the script wait until it assumes the scan is not continuing def get_destination_options(self, command): if not command: return False command = ','.join(command).replace(' ', ',') scans = util.parse_multi_range(command) return dict(first=min(scans), last=max(scans), range=','.join(str(scan) for scan in scans)) # CONVENIENCE FUNCTIONS def get_scan(self, scannumber): spec = specfilewrapper.Specfile(self.config.specfile) return spec.select('{0}.1'.format(scannumber)) def get_delayed_scan(self, scannumber, timeout=None): delay = util.loop_delayer(5) start = time.time() while 1: try: return self.get_scan(scannumber) # reload entire specfile except specfile.error: if timeout is not None and time.time() - start > timeout: raise errors.BackendError('Scan timed out. There is no data to process') else: util.status('waiting for scan {0}...'.format(scannumber)) next(delay) def wait_for_points(self, scannumber, stop, timeout=None): delay = util.loop_delayer(1) start = time.time() while 1: scan = self.get_scan(scannumber) try: if scan.lines() >= stop: next(delay) # time delay between specfile and edf file return False except specfile.error: pass finally: next(delay) util.status('waiting for scan {0}, point {1}...'.format(scannumber, stop)) if (timeout is not None and time.time() - start > timeout) or self.is_aborted(scan): try: util.statusnl('scan {0} aborted at point {1}'.format(scannumber, scan.lines())) return True except specfile.error: raise errors.BackendError('Scan was aborted before images were collected. There is no data to process') def target(self, scan): if any(tuple(scan.command().startswith(pattern) for pattern in ['hklscan', 'a2scan', 'ascan', 'ringscan'])): return int(scan.command().split()[-2]) + 1 elif scan.command().startswith('mesh'): return int(scan.command().split()[-6]) * int(scan.command().split()[-2]) + 1 elif scan.command().startswith('loopscan'): return int(scan.command().split()[-3]) elif scan.command().startswith('xascan'): params = numpy.array(scan.command().split()[-6:]).astype(float) return int(params[2] + 1 + (params[4] - 1) / params[5] * params[2]) elif self.is_zap(scan): return int(scan.command().split()[-2]) else: return -1 @staticmethod def is_zap(scan): return scan.command().startswith('zap') @staticmethod def is_aborted(scan): for line in scan.header('C'): if 'Scan aborted' in line: return True return False def find_edfs(self, pattern, scanno): files = glob.glob(pattern) ret = {} for file in files: try: filename = os.path.basename(file).split('.')[0] scan, point, image = filename.split('_')[-3:] scan, point, image = int(scan), int(point), int(image) if scan == scanno and point not in list(ret.keys()): ret[point] = file except ValueError: continue return ret @staticmethod def apply_mask(data, xmask, ymask): roi = data[ymask, :] return roi[:, xmask] def get_wavelength(self, G): for line in G: if line.startswith('#G4'): return float(line.split(' ')[4]) return None # MAIN LOGIC def get_scan_params(self, scan): self.dbg_scanno = scan.number() if self.is_zap(scan): # zapscans don't contain the UB matrix, this needs to be fixed at ID03 scanno = scan.number() UB = None while 1: # look back in spec file to locate a UB matrix try: ubscan = self.get_scan(scanno) except specfilewrapper.specfile.error: break try: UB = numpy.array(ubscan.header('G')[2].split(' ')[-9:], dtype=numpy.float) except: scanno -= 1 else: break if UB is None: # fall back to UB matrix from the configfile if not self.config.UB: raise errors.ConfigError('UB matrix must be specified in configuration file when processing zapscans') UB = numpy.array(self.config.UB) else: UB = numpy.array(scan.header('G')[2].split(' ')[-9:], dtype=numpy.float) if self.config.wavelength is None: wavelength = self.get_wavelength(scan.header('G')) if wavelength is None or wavelength == 0: raise errors.BackendError('No or incorrect wavelength specified in the specfile. Please add wavelength to the configfile in the input section') else: wavelength = self.config.wavelength self.metadict['UB'] = UB self.metadict['wavelength'] = wavelength return wavelength, UB def get_images(self, scan, first, last, dry_run=False): if self.config.background: if not os.path.exists(self.config.background): raise errors.FileError('could not find background file {0}'.format(self.config.background)) if dry_run: yield else: edf = EdfFile.EdfFile(self.config.background) for i in range(first, last+1): self.dbg_pointno = i yield edf.GetData(0) else: if self.is_zap(scan): scanheaderC = scan.header('C') zapscanno = int(scanheaderC[2].split(' ')[-1]) # is different from scanno should be changed in spec! try: uccdtagline = scanheaderC[0] UCCD = os.path.split(uccdtagline.split()[-1]) except: print('warning: UCCD tag not found, use imagefolder for proper file specification') UCCD = [] pattern = self._get_pattern(UCCD) matches = self.find_edfs(pattern, zapscanno) if 0 not in matches: raise errors.FileError('could not find matching edf for zapscannumber {0} using pattern {1}'.format(zapscanno, pattern)) if dry_run: yield else: edf = EdfFile.EdfFile(matches[0]) for i in range(first, last+1): self.dbg_pointno = i yield edf.GetData(i) else: try: uccdtagline = scan.header('UCCD')[0] UCCD = os.path.split(os.path.dirname(uccdtagline.split()[-1])) except: print('warning: UCCD tag not found, use imagefolder for proper file specification') UCCD = [] pattern = self._get_pattern(UCCD) matches = self.find_edfs(pattern, scan.number()) if set(range(first, last + 1)) > set(matches.keys()): raise errors.FileError("incorrect number of matches for scan {0} using pattern {1}".format(scan.number(), pattern)) if dry_run: yield else: for i in range(first, last+1): self.dbg_pointno = i edf = EdfFile.EdfFile(matches[i]) yield edf.GetData(0) def _get_pattern(self, UCCD): imagefolder = self.config.imagefolder if imagefolder: try: imagefolder = imagefolder.format(UCCD=UCCD, rUCCD=list(reversed(UCCD))) except Exception as e: raise errors.ConfigError("invalid 'imagefolder' specification '{0}': {1}".format(self.config.imagefolder, e)) else: if not os.path.exists(imagefolder): raise errors.ConfigError("invalid 'imagefolder' specification '{0}'. Path {1} does not exist".format(self.config.imagefolder, imagefolder)) else: imagefolder = os.path.join(*UCCD) if not os.path.exists(imagefolder): raise errors.ConfigError("invalid UCCD tag '{0}'. The UCCD tag in the specfile does not point to an existing folder. Specify the imagefolder in the configuration file.".format(imagefolder)) return os.path.join(imagefolder, '*') class EH1(ID03Input): monitor_counter = 'mon' def parse_config(self, config): super(EH1, self).parse_config(config) self.config.centralpixel = util.parse_tuple(config.pop('centralpixel'), length=2, type=int) # x,y self.config.hr = config.pop('hr', None) # Optional, hexapod rotations in miliradians. At the entered value the sample is assumed flat, if not entered the sample is assumed flat at the spec values. self.config.UB = config.pop('ub', None) # Optional, takes specfile matrix by default if self.config.UB: self.config.UB = util.parse_tuple(self.config.UB, length=9, type=float) if self.config.hr: self.config.hr = util.parse_tuple(self.config.hr, length=2, type=float) def process_image(self, scanparams, pointparams, image): gamma, delta, theta, chi, phi, mu, mon, transm, hrx, hry = pointparams wavelength, UB = scanparams weights = numpy.ones_like(image) if self.config.hr: zerohrx, zerohry = self.config.hr chi = (hrx - zerohrx) / numpy.pi * 180. / 1000 phi = (hry - zerohry) / numpy.pi * 180. / 1000 if self.config.background: data = image / mon else: data = image / mon / transm if mon == 0: raise errors.BackendError('Monitor is zero, this results in empty output. Scannumber = {0}, pointnumber = {1}. Did you forget to open the shutter?'.format(self.dbg_scanno, self.dbg_pointno)) util.status('{4}| gamma: {0}, delta: {1}, theta: {2}, mu: {3}'.format(gamma, delta, theta, mu, time.ctime(time.time()))) # pixels to angles pixelsize = numpy.array(self.config.pixelsize) sdd = self.config.sdd app = numpy.arctan(pixelsize / sdd) * 180 / numpy.pi centralpixel = self.config.centralpixel # (column, row) = (delta, gamma) gamma_range = -app[1] * (numpy.arange(data.shape[1]) - centralpixel[1]) + gamma delta_range = app[0] * (numpy.arange(data.shape[0]) - centralpixel[0]) + delta # masking if self.config.maskmatrix is not None: if self.config.maskmatrix.shape != data.shape: raise errors.BackendError('The mask matrix does not have the same shape as the images') weights *= self.config.maskmatrix gamma_range = gamma_range[self.config.ymask] delta_range = delta_range[self.config.xmask] intensity = self.apply_mask(data, self.config.xmask, self.config.ymask) weights = self.apply_mask(weights, self.config.xmask, self.config.ymask) #polarisation correction delta_grid, gamma_grid = numpy.meshgrid(delta_range, gamma_range) Pver = 1 - numpy.sin(delta_grid * numpy.pi / 180.)**2 * numpy.cos(gamma_grid * numpy.pi / 180.)**2 intensity /= Pver return intensity, weights, (wavelength, UB, gamma_range, delta_range, theta, mu, chi, phi) def get_point_params(self, scan, first, last): sl = slice(first, last+1) GAM, DEL, TH, CHI, PHI, MU, MON, TRANSM, HRX, HRY = list(range(10)) params = numpy.zeros((last - first + 1, 10)) # gamma delta theta chi phi mu mon transm params[:, CHI] = scan.motorpos('Chi') params[:, PHI] = scan.motorpos('Phi') try: params[:, HRX] = scan.motorpos('hrx') params[:, HRY] = scan.motorpos('hry') except: raise errors.BackendError('The specfile does not accept hrx and hry as a motor label. Have you selected the right hutch? Scannumber = {0}, pointnumber = {1}'.format(self.dbg_scanno, self.dbg_pointno)) if self.is_zap(scan): if 'th' in scan.alllabels(): th = scan.datacol('th')[sl] if len(th) > 1: sign = numpy.sign(th[1] - th[0]) else: sign = 1 # correction for difference between back and forth in th motor params[:, TH] = th + sign * self.config.th_offset else: params[:, TH] = scan.motorpos('Theta') params[:, GAM] = scan.motorpos('Gam') params[:, DEL] = scan.motorpos('Delta') params[:, MU] = scan.motorpos('Mu') params[:, MON] = scan.datacol('zap_mon')[sl] transm = scan.datacol('zap_transm') transm[-1] = transm[-2] # bug in specfile params[:, TRANSM] = transm[sl] else: if 'hrx' in scan.alllabels(): params[:, HRX] = scan.datacol('hrx')[sl] if 'hry' in scan.alllabels(): params[:, HRY] = scan.datacol('hry')[sl] params[:, TH] = scan.datacol('thcnt')[sl] params[:, GAM] = scan.datacol('gamcnt')[sl] params[:, DEL] = scan.datacol('delcnt')[sl] try: params[:, MON] = scan.datacol(self.monitor_counter)[sl] # differs in EH1/EH2 except: raise errors.BackendError('The specfile does not accept {2} as a monitor label. Have you selected the right hutch? Scannumber = {0}, pointnumber = {1}'.format(self.dbg_scanno, self.dbg_pointno, self.monitor_counter)) params[:, TRANSM] = scan.datacol('transm')[sl] params[:, MU] = scan.datacol('mucnt')[sl] return params class EH2(ID03Input): monitor_counter = 'Monitor' def parse_config(self, config): super(EH2, self).parse_config(config) self.config.centralpixel = util.parse_tuple(config.pop('centralpixel'), length=2, type=int) # x,y self.config.UB = config.pop('ub', None) # Optional, takes specfile matrix by default if self.config.UB: self.config.UB = util.parse_tuple(self.config.UB, length=9, type=float) def process_image(self, scanparams, pointparams, image): gamma, delta, theta, chi, phi, mu, mon, transm = pointparams wavelength, UB = scanparams weights = numpy.ones_like(image) if self.config.background: data = image / mon else: data = image / mon / transm if mon == 0: raise errors.BackendError('Monitor is zero, this results in empty output. Scannumber = {0}, pointnumber = {1}. Did you forget to open the shutter?'.format(self.dbg_scanno, self.dbg_pointno)) util.status('{4}| gamma: {0}, delta: {1}, theta: {2}, mu: {3}'.format(gamma, delta, theta, mu, time.ctime(time.time()))) # area correction sdd = self.config.sdd / numpy.cos(gamma * numpy.pi / 180) data *= (self.config.sdd / sdd)**2 # pixels to angles pixelsize = numpy.array(self.config.pixelsize) app = numpy.arctan(pixelsize / sdd) * 180 / numpy.pi centralpixel = self.config.centralpixel # (row, column) = (gamma, delta) gamma_range = - 1 * app[0] * (numpy.arange(data.shape[0]) - centralpixel[0]) + gamma delta_range = app[1] * (numpy.arange(data.shape[1]) - centralpixel[1]) + delta # masking if self.config.maskmatrix is not None: if self.config.maskmatrix.shape != data.shape: raise errors.BackendError('The mask matrix does not have the same shape as the images') weights *= self.config.maskmatrix gamma_range = gamma_range[self.config.xmask] delta_range = delta_range[self.config.ymask] intensity = self.apply_mask(data, self.config.xmask, self.config.ymask) weights = self.apply_mask(weights, self.config.xmask, self.config.ymask) intensity = numpy.fliplr(intensity) intensity = numpy.rot90(intensity) weights = numpy.fliplr(weights) # TODO: should be done more efficiently. Will prob change with new HKL calculations weights = numpy.rot90(weights) #polarisation correction delta_grid, gamma_grid = numpy.meshgrid(delta_range, gamma_range) Phor = 1 - (numpy.sin(mu * numpy.pi / 180.) * numpy.sin(delta_grid * numpy.pi / 180.) * numpy.cos(gamma_grid * numpy.pi / 180.) + numpy.cos(mu * numpy.pi / 180.) * numpy.sin(gamma_grid * numpy.pi / 180.))**2 intensity /= Phor return intensity, weights, (wavelength, UB, gamma_range, delta_range, theta, mu, chi, phi) def get_point_params(self, scan, first, last): sl = slice(first, last+1) GAM, DEL, TH, CHI, PHI, MU, MON, TRANSM = list(range(8)) params = numpy.zeros((last - first + 1, 8)) # gamma delta theta chi phi mu mon transm params[:, CHI] = scan.motorpos('Chi') params[:, PHI] = scan.motorpos('Phi') if self.is_zap(scan): if 'th' in scan.alllabels(): th = scan.datacol('th')[sl] if len(th) > 1: sign = numpy.sign(th[1] - th[0]) else: sign = 1 # correction for difference between back and forth in th motor params[:, TH] = th + sign * self.config.th_offset else: params[:, TH] = scan.motorpos('Theta') params[:, GAM] = scan.motorpos('Gamma') params[:, DEL] = scan.motorpos('Delta') params[:, MU] = scan.motorpos('Mu') params[:, MON] = scan.datacol('zap_mon')[sl] transm = scan.datacol('zap_transm') transm[-1] = transm[-2] # bug in specfile params[:, TRANSM] = transm[sl] else: params[:, TH] = scan.datacol('thcnt')[sl] params[:, GAM] = scan.datacol('gamcnt')[sl] params[:, DEL] = scan.datacol('delcnt')[sl] try: params[:, MON] = scan.datacol(self.monitor_counter)[sl] # differs in EH1/EH2 except: raise errors.BackendError('The specfile does not accept {2} as a monitor label. Have you selected the right hutch? Scannumber = {0}, pointnumber = {1}'.format(self.dbg_scanno, self.dbg_pointno, self.monitor_counter)) params[:, TRANSM] = scan.datacol('transm')[sl] params[:, MU] = scan.datacol('mucnt')[sl] return params class GisaxsDetector(ID03Input): monitor_counter = 'mon' def process_image(self, scanparams, pointparams, image): ccdy, ccdz, theta, chi, phi, mu, mon, transm = pointparams weights = numpy.ones_like(image) wavelength, UB = scanparams if self.config.background: data = image / mon else: data = image / mon / transm if mon == 0: raise errors.BackendError('Monitor is zero, this results in empty output. Scannumber = {0}, pointnumber = {1}. Did you forget to open the shutter?'.format(self.dbg_scanno, self.dbg_pointno)) util.status('{4}| ccdy: {0}, ccdz: {1}, theta: {2}, mu: {3}'.format(ccdy, ccdz, theta, mu, time.ctime(time.time()))) # pixels to angles pixelsize = numpy.array(self.config.pixelsize) sdd = self.config.sdd directbeam = (self.config.directbeam[0] - (ccdy - self.config.directbeam_coords[0]) * pixelsize[0], self.config.directbeam[1] - (ccdz - self.config.directbeam_coords[1]) * pixelsize[1]) gamma_distance = - pixelsize[1] * (numpy.arange(data.shape[1]) - directbeam[1]) delta_distance = - pixelsize[0] * (numpy.arange(data.shape[0]) - directbeam[0]) gamma_range = numpy.arctan2(gamma_distance, sdd) / numpy.pi * 180 - mu delta_range = numpy.arctan2(delta_distance, sdd) / numpy.pi * 180 #sample pixel distance spd = numpy.sqrt(gamma_distance**2 + delta_distance**2 + sdd**2) data *= spd**2 / sdd # masking if self.config.maskmatrix is not None: if self.config.maskmatrix.shape != data.shape: raise errors.BackendError('The mask matrix does not have the same shape as the images') weights *= self.config.maskmatrix gamma_range = gamma_range[self.config.ymask] delta_range = delta_range[self.config.xmask] intensity = self.apply_mask(data, self.config.xmask, self.config.ymask) weights = self.apply_mask(weights, self.config.xmask, self.config.ymask) return intensity, weights, (wavelength, UB, gamma_range, delta_range, theta, mu, chi, phi) def parse_config(self, config): super(GisaxsDetector, self).parse_config(config) self.config.directbeam = util.parse_tuple(config.pop('directbeam'), length=2, type=int) self.config.directbeam_coords = util.parse_tuple(config.pop('directbeam_coords'), length=2, type=float) # Coordinates of ccdy and ccdz at the direct beam position def get_point_params(self, scan, first, last): sl = slice(first, last+1) CCDY, CCDZ, TH, CHI, PHI, MU, MON, TRANSM = list(range(8)) params = numpy.zeros((last - first + 1, 8)) # gamma delta theta chi phi mu mon transm params[:, CHI] = scan.motorpos('Chi') params[:, PHI] = scan.motorpos('Phi') params[:, CCDY] = scan.motorpos('ccdy') params[:, CCDZ] = scan.motorpos('ccdz') params[:, TH] = scan.datacol('thcnt')[sl] try: params[:, MON] = scan.datacol(self.monitor_counter)[sl] # differs in EH1/EH2 except: raise errors.BackendError('The specfile does not accept {2} as a monitor label. Have you selected the right hutch? Scannumber = {0}, pointnumber = {1}'.format(self.dbg_scanno, self.dbg_pointno, self.monitor_counter)) params[:, TRANSM] = scan.datacol('transm')[sl] params[:, MU] = scan.datacol('mucnt')[sl] return params def find_edfs(self, pattern, scanno): files = glob.glob(pattern) ret = {} for file in files: try: filename = os.path.basename(file).split('.')[0] scan, point = filename.split('_')[-2:] scan, point = int(scan), int(point) if scan == scanno and point not in list(ret.keys()): ret[point] = file except ValueError: continue return ret def load_matrix(filename): if filename == None: return None if os.path.exists(filename): ext = os.path.splitext(filename)[-1] if ext == '.txt': return numpy.array(numpy.loadtxt(filename), dtype=numpy.bool) elif ext == '.npy': return numpy.array(numpy.load(filename), dtype=numpy.bool) elif ext == '.edf': return numpy.array(EdfFile.EdfFile(filename).getData(0), dtype=numpy.bool) else: raise ValueError('unknown extension {0}, unable to load matrix!\n'.format(ext)) else: raise IOError('filename: {0} does not exist. Can not load matrix'.format(filename)) binoculars-0.0.4/binoculars/backends/id03_xu.py000066400000000000000000000243051343276063200214260ustar00rootroot00000000000000""" BINocular backend for beamline ID03:EH2 This backend should serve as a basic example of a backend based on xrayutilities [1]. It still uses PyMCA for parsing the spec,edf files. The 'original' ID03 backend was used as a template. Created on 2014-10-16 [1] http://xrayutilities.sourceforge.net/ author: Dominik Kriegner (dominik.kriegner@gmail.com) """ import sys import os import glob import numpy import xrayutilities as xu from PyMca import specfile #python3 support PY3 = sys.version_info > (3,) if PY3: pass else: from itertools import izip as zip try: from PyMca import specfilewrapper, EdfFile except ImportError: from PyMca.PyMcaIO import specfilewrapper, EdfFile from .. import backend, errors, util class HKLProjection(backend.ProjectionBase): # scalars: mu, theta, [chi, phi, "omitted"] delta, gamR, gamT, ty, wavelength # 3x3 matrix: UB def project(self, mu, theta, delta, gamR, gamT, ty, wavelength, UB, qconv): qconv.wavelength = wavelength h, k, l = qconv.area(mu, theta, mu, delta, ty, gamT, gamR, UB=UB.reshape((3,3))) return (h, k, l) def get_axis_labels(self): return 'H', 'K', 'L' class HKProjection(HKLProjection): def project(self, mu, theta, delta, gamR, gamT, ty, wavelength, UB, qconv): H, K, L = super(HKProjection, self).project(mu, theta, delta, gamR, gamT, ty, wavelength, UB, qconv) return (H, K) def get_axis_labels(self): return 'H', 'K' class QProjection(backend.ProjectionBase): def project(self, mu, theta, delta, gamR, gamT, ty, wavelength, UB, qconv): qconv.wavelength = wavelength qx, qy, qz = qconv.area(mu, theta, mu, delta, ty, gamT, gamR, UB=numpy.identity(3)) return (qx, qy, qz) def get_axis_labels(self): return 'qx', 'qy', 'qz' class ID03Input(backend.InputBase): # OFFICIAL API def generate_jobs(self, command): scans = util.parse_multi_range(','.join(command).replace(' ', ',')) if not len(scans): sys.stderr.write('error: no scans selected, nothing to do\n') for scanno in scans: scan = self.get_scan(scanno) try: pointcount = scan.lines() except specfile.error: # no points continue next(self.get_images(scan, 0, pointcount-1, dry_run=True))# dryrun if self.config.target_weight and pointcount > self.config.target_weight * 1.4: for s in util.chunk_slicer(pointcount, self.config.target_weight): yield backend.Job(scan=scanno, firstpoint=s.start, lastpoint=s.stop-1, weight=s.stop-s.start) else: yield backend.Job(scan=scanno, firstpoint=0, lastpoint=pointcount-1, weight=pointcount) def process_job(self, job): super(ID03Input, self).process_job(job) scan = self.get_scan(job.scan) scanparams = self.get_scan_params(scan) # wavelength, UB pointparams = self.get_point_params(scan, job.firstpoint, job.lastpoint) # 1D array of diffractometer angles + mon + transm images = self.get_images(scan, job.firstpoint, job.lastpoint) # iterator! for pp, image in zip(pointparams, images): yield self.process_image(scanparams, pp, image) def parse_config(self, config): super(ID03Input, self).parse_config(config) self.config.xmask = util.parse_multi_range(config.pop('xmask')) self.config.ymask = util.parse_multi_range(config.pop('ymask')) self.config.specfile = config.pop('specfile') self.config.imagefolder = config.pop('imagefolder', None) self.config.UB = config.pop('ub', None) if self.config.UB: self.config.UB = util.parse_tuple(self.config.UB, length=9, type=float) self.config.sdd = float(config.pop('sdd')) self.config.pixelsize = util.parse_tuple(config.pop('pixelsize'), length=2, type=float) self.config.centralpixel = util.parse_tuple(config.pop('centralpixel'), length=2, type=int) def get_destination_options(self, command): if not command: return False command = ','.join(command).replace(' ', ',') scans = util.parse_multi_range(command) return dict(first=min(scans), last=max(scans), range=','.join(command)) # CONVENIENCE FUNCTIONS _spec = None def get_scan(self, scannumber): if self._spec is None: self._spec = specfilewrapper.Specfile(self.config.specfile) return self._spec.select('{0}.1'.format(scannumber)) def find_edfs(self, pattern, scanno): files = glob.glob(pattern) ret = {} for file in files: try: filename = os.path.basename(file).split('.')[0] scan, point, image = filename.split('_')[-3:] scan, point, image = int(scan), int(point), int(image) if scan == scanno and point not in list(ret.keys()): ret[point] = file except ValueError: continue return ret @staticmethod def apply_mask(data, xmask, ymask): roi = data[ymask, :] return roi[:, xmask] # MAIN LOGIC def get_scan_params(self, scan): UB = numpy.array(scan.header('G')[2].split(' ')[-9:],dtype=numpy.float) wavelength = float(scan.header('G')[1].split(' ')[-1]) return wavelength, UB def get_images(self, scan, first, last, dry_run=False): try: uccdtagline = scan.header('UCCD')[0] UCCD = os.path.split(os.path.dirname(uccdtagline.split()[-1])) except: print('warning: UCCD tag not found, use imagefolder for proper file specification') UCCD = [] pattern = self._get_pattern(UCCD) matches = self.find_edfs(pattern, scan.number()) if set(range(first, last + 1)) > set(matches.keys()): raise errors.FileError("incorrect number of matches for scan {0} using pattern {1}".format(scan.number(), pattern)) if dry_run: yield else: for i in range(first, last+1): edf = EdfFile.EdfFile(matches[i]) yield edf.GetData(0) def _get_pattern(self,UCCD): imagefolder = self.config.imagefolder if imagefolder: try: imagefolder = imagefolder.format(UCCD=UCCD, rUCCD=list(reversed(UCCD))) except Exception as e: raise errors.ConfigError("invalid 'imagefolder' specification '{0}': {1}".format(self.config.imagefolder, e)) else: imagefolder = os.path.join(*UCCD) if not os.path.exists(imagefolder): raise ValueError("invalid 'imagefolder' specification '{0}'. Path {1} does not exist".format(self.config.imagefolder, imagefolder)) return os.path.join(imagefolder, '*') class EH2(ID03Input): monitor_counter = 'Monitor' # define ID03 goniometer, SIXC geometry with 2D detector mounted on a # translation-axis (distance changing with changing Gamma) # The geometry is: 1+3S+2D # sample axis mu, th, chi, phi -> here chi,phi are omitted # detector axis mu, del, gam # gam is realized by a translation along z (gamT) and rotation around x+ (gamR) qconv = xu.experiment.QConversion(['x+', 'z-'], # 'y+', 'z+' ['x+', 'z-', 'ty', 'tz', 'x+'], [0, 1, 0]) # convention for coordinate system: y downstream; z outwards; x upwards # (righthanded) # QConversion will set up the goniometer geometry. So the first argument # describes the sample rotations, the second the detector rotations and the # third the primary beam direction. ty = 600. # mm def parse_config(self, config): super(EH2, self).parse_config(config) centralpixel = self.config.centralpixel # (row, column) = (gamma, delta) # define detector parameters roi = (self.config.ymask[0], self.config.ymask[-1]+1, self.config.xmask[0], self.config.xmask[-1]+1) self.qconv.init_area('x+', 'z-', cch1=centralpixel[1], cch2=centralpixel[0], Nch1=516, Nch2=516, pwidth1=self.config.pixelsize[1], pwidth2=self.config.pixelsize[0], distance=self.config.sdd-self.ty, roi=roi) # distance sdd-600 corresponds to distance of the detector chip from # the gamR rotation axis (rest is handled by the translations ty and # gamT (along z)) print(('{:>9} {:>10} {:>9} {:>9}'.format('Mu', 'Theta', 'Delta', 'Gamma'))) def process_image(self, scanparams, pointparams, image): mu, theta, chi, phi, delta, gamma, mon, transm = pointparams wavelength, UB = scanparams data = image / mon / transm print(('{:9.4f} {:10.4f} {:9.4f} {:9.4f}'.format(mu, theta, delta, gamma))) # recalculate detector translation (which should be saved!) gamT = self.ty * numpy.tan(numpy.radians(gamma)) # masking intensity = self.apply_mask(data, self.config.xmask, self.config.ymask) # no polarization correction for the moment! return intensity, numpy.ones_like(intensity), (mu, theta, delta, gamma, gamT,#weights added to API. keeps functionality identical with wights of one self.ty, wavelength, UB, self.qconv) def get_point_params(self, scan, first, last): sl = slice(first, last+1) MU, TH, CHI, PHI, DEL, GAM, MON, TRANSM = list(range(8)) params = numpy.zeros((last - first + 1, 8)) # Mu, Theta, Chi, Phi, Delta, Gamma, MON, transm params[:, CHI] = scan.motorpos('Chi') params[:, PHI] = scan.motorpos('Phi') params[:, TH] = scan.datacol('thcnt')[sl] params[:, GAM] = scan.datacol('gamcnt')[sl] params[:, DEL] = scan.datacol('delcnt')[sl] params[:, MON] = scan.datacol(self.monitor_counter)[sl] params[:, TRANSM] = scan.datacol('transm')[sl] params[:, MU] = scan.datacol('mucnt')[sl] return params binoculars-0.0.4/binoculars/backends/io7.py000066400000000000000000000346641343276063200206620ustar00rootroot00000000000000# -*- encoding: utf-8 -*- ''' This file is part of the binoculars project. The BINoculars library is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. The BINoculars library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with the hkl library. If not, see . Copyright (C) 2012-2015 European Synchrotron Radiation Facility Grenoble, France Authors: Willem Onderwaater Jonathan Rawle ''' import sys import os import itertools import numpy import time import math import json from scipy.misc import imread import scisoftpy as dnp from scisoftpy import sin,cos from .. import backend, errors, util PY3 = sys.version_info > (3,) if PY3: from functools import reduce else: from itertools import izip as zip class HKLProjection(backend.ProjectionBase): # scalars: mu, theta, [chi, phi, "omitted"] delta, gamR, gamT, ty, wavelength # 3x3 matrix: UB def project(self, energy, UB, pixels, gamma, delta, omega, alpha, nu): # put the detector at the right position dx,dy,dz = pixels # convert angles to radians gamma, delta, alpha, omega, nu = numpy.radians((gamma, delta, alpha, omega, nu)) RGam = numpy.matrix([[1,0,0],[0,cos(gamma),-sin(gamma)],[0,sin(gamma),cos(gamma)]]) RDel = (numpy.matrix([[cos(delta),-sin(delta),0],[sin(delta),cos(delta),0],[0,0,1]])).getI() RNu = numpy.matrix([[cos(nu),0,sin(nu)],[0,1,0],[-sin(nu),0,cos(nu)]]) # calculate Cartesian coordinates for each pixel using clever matrix stuff M = numpy.mat(numpy.concatenate((dx.flatten(0), dy.flatten(0), dz.flatten(0))).reshape(3,dx.shape[0]*dx.shape[1])) XYZp = RGam * RDel * RNu * M xp = dnp.array(XYZp[0]).reshape(dx.shape) yp = dnp.array(XYZp[1]).reshape(dy.shape) zp = dnp.array(XYZp[2]).reshape(dz.shape) # don't bother with the part about slits... # Calculate effective gamma and delta for each pixel d_ds = dnp.sqrt(xp**2 + yp**2 + zp**2) Gam = dnp.arctan2(zp, yp) Del = -1 * dnp.arcsin(-xp/d_ds) # wavenumber k = 2 * math.pi / 12.398 * energy # Define the needed matrices. The notation follows the article by Bunk & # Nielsen. J.Appl.Cryst. (2004) 37, 216-222. M1 = k * numpy.matrix(cos(omega) * sin(Del) - sin(omega) * (cos(alpha) * (cos(Gam) * cos(Del)-1) + sin(alpha) * sin(Gam) * cos(Del))) M2 = k * numpy.matrix(sin(omega) * sin(Del) + cos(omega) * (cos(alpha) * (cos(Gam) * cos(Del)-1) + sin(alpha) * sin(Gam) * cos(Del))) M3 = k * numpy.matrix(-sin(alpha) * (cos(Gam) * cos(Del)-1) + cos(alpha) * sin(Gam) * cos(Del)) # invert UB matrix UBi = numpy.matrix(UB).getI() # calculate HKL H = UBi[0,0]*M1 + UBi[0,1]*M2 + UBi[0,2]*M3 K = UBi[1,0]*M1 + UBi[1,1]*M2 + UBi[1,2]*M3 L = UBi[2,0]*M1 + UBi[2,1]*M2 + UBi[2,2]*M3 return (H, K, L) def get_axis_labels(self): return 'H', 'K', 'L' class GammaDelta(HKLProjection): # just passing on the coordinates, makes it easy to accurately test the theta correction def project(self, beamenergy, UB, gamma, delta, omega, alpha): delta, gamma = numpy.meshgrid(delta, gamma) return (gamma, delta) def get_axis_labels(self): return 'Gamma', 'Delta' class pixels(backend.ProjectionBase): def project(self, beamenergy, UB, gamma, delta, omega, alpha): y, x = numpy.mgrid[slice(None, gamma.shape[0]), slice(None, delta.shape[0])] return (y, x) def get_axis_labels(self): return 'y', 'x' class IO7Input(backend.InputBase): # OFFICIAL API dbg_scanno = None dbg_pointno = None def generate_jobs(self, command): scans = util.parse_multi_range(','.join(command).replace(' ', ',')) if not len(scans): sys.stderr.write('error: no scans selected, nothing to do\n') for scanno in scans: util.status('processing scan {0}...'.format(scanno)) if self.config.pr: pointcount = self.config.pr[1] - self.config.pr[0] + 1 start = self.config.pr[0] else: scan = self.get_scan(scanno) pointcount = len(scan.file) start = 0 if pointcount > self.config.target_weight * 1.4: for s in util.chunk_slicer(pointcount, self.config.target_weight): yield backend.Job(scan=scanno, firstpoint=start+s.start, lastpoint=start+s.stop-1, weight=s.stop-s.start) else: yield backend.Job(scan=scanno, firstpoint=start, lastpoint=start+pointcount-1, weight=pointcount) def process_job(self, job): super(IO7Input, self).process_job(job) scan = self.get_scan(job.scan) self.metadict = dict() try: scanparams = self.get_scan_params(scan) # wavelength, UB pointparams = self.get_point_params(scan, job.firstpoint, job.lastpoint) # 2D array of diffractometer angles + mon + transm images = self.get_images(scan, job.firstpoint, job.lastpoint) # iterator! for pp, image in zip(pointparams, images): yield self.process_image(scan, scanparams, pp, image) util.statuseol() except Exception as exc: exc.args = errors.addmessage(exc.args, ', An error occured for scan {0} at point {1}. See above for more information'.format(self.dbg_scanno, self.dbg_pointno)) raise self.metadata.add_section('id7_backend', self.metadict) def get_scan_params(self, scan): energy = scan.metadata.dcm1energy UB = numpy.array(json.loads(scan.metadata.diffcalc_ub)) self.metadict['UB'] = UB self.metadict['energy'] = energy return energy, UB def get_point_params(self, scan, first, last): sl = slice(first, last+1) GAM, DEL, OMG, CHI, PHI, ALF, MON, TRANSM = list(range(8)) params = numpy.zeros((last - first + 1, 8)) # gamma delta theta chi phi mu mon transm params[:, CHI] = 0 params[:, PHI] = 0 params[:, OMG] = scan['omega'][sl] params[:, GAM] = scan['gamma'][sl] params[:, DEL] = scan['delta'][sl] params[:, ALF] = scan['alpha'][sl] return params def get_images(self, scan, first, last, dry_run=False): sl = slice(first, last+1) for fn in scan.file[sl]: yield imread(self.get_imagefilename(fn)) def get_imagefilename(self, filename): if self.config.imagefolder is None: if os.path.exists(filename): return filename else: raise errors.ConfigError("image filename specified in the datafile does not exist '{0}'".format(filename)) else: head, tail = os.path.split(filename) folders = head.split('/') try: imagefolder = self.config.imagefolder.format(folders=folders, rfolders=list(reversed(folders))) except Exception as e: raise errors.ConfigError("invalid 'imagefolder' specification '{0}': {1}".format(self.config.imagefolder, e)) else: if not os.path.exists(imagefolder): raise errors.ConfigError("invalid 'imagefolder' specification '{0}'. Path {1} does not exist".format(self.config.imagefolder, imagefolder)) fn = os.path.join(imagefolder, tail) if os.path.exists(fn): return fn else: raise errors.ConfigError("image filename does not exist '{0}', either imagefolder is wrongly specified or image file does not exist".format(filename)) def parse_config(self, config): super(IO7Input, self).parse_config(config) self.config.xmask = util.parse_multi_range(config.pop('xmask', None))#Optional, select a subset of the image range in the x direction. all by default self.config.ymask = util.parse_multi_range(config.pop('ymask', None))#Optional, select a subset of the image range in the y direction. all by default self.config.datafilefolder = config.pop('datafilefolder')#Folder with the datafiles self.config.imagefolder = config.pop('imagefolder', None) # Optional, takes datafile folder tag by default self.config.pr = config.pop('pr', None) #Optional, all range by default if self.config.xmask is None: self.config.xmask = slice(None) if self.config.ymask is None: self.config.ymask = slice(None) if self.config.pr: self.config.pr = util.parse_tuple(self.config.pr, length=2, type=int) self.config.centralpixel = util.parse_tuple(config.pop('centralpixel'), length=2, type=int) #x,y self.config.maskmatrix = config.pop('maskmatrix', None)#Optional, if supplied pixels where the mask is 0 will be removed self.config.pixelsize = util.parse_tuple(config.pop('pixelsize'), length=2, type=float) # pixel size x/y (mm) (same dimension as sdd) def get_destination_options(self, command): if not command: return False command = ','.join(command).replace(' ', ',') scans = util.parse_multi_range(command) return dict(first=min(scans), last=max(scans), range=','.join(str(scan) for scan in scans)) # CONVENIENCE FUNCTIONS def get_scan(self, scanno): filename = os.path.join(self.config.datafilefolder, str(scanno) + '.dat') if not os.path.exists(filename): raise errors.ConfigError('datafile filename does not exist: {0}'.format(filename)) return dnp.io.load(filename) @staticmethod def apply_mask(data, xmask, ymask): roi = data[ymask, :] return roi[:, xmask] class EH2(IO7Input): def parse_config(self, config): super(IO7Input, self).parse_config(config) self.config.sdd = float(config.pop('sdd'), None)#Sample to detector distance (mm) if self.config.sdd is not None: self.config.sdd = float(self.config.sdd) def process_image(self, scan, scanparams, pointparams, image): gamma, delta, omega, chi, phi, alpha, mon, transm = pointparams#GAM, DEL, OMG, CHI, PHI, ALF, MON, TRANSM energy, UB = scanparams weights = numpy.ones_like(image) util.status('{4}| gamma: {0}, delta: {1}, omega: {2}, mu: {3}'.format(gamma, delta, omega, alpha, time.ctime(time.time()))) # pixels to angles pixelsize = numpy.array(self.config.pixelsize) if self.config.sdd is None: sdd = scan.metadata.diff1detdist else: sdd = self.config.sdd nu = scan.metadata.diff2prot centralpixel = self.config.centralpixel # (column, row) = (delta, gamma) dz = (numpy.indices(image.shape)[1] - centralpixel[1]) * pixelsize[1] dx = (numpy.indices(image.shape)[0] - centralpixel[0]) * pixelsize[0] dy = numpy.ones(image.shape) * sdd # masking if self.config.maskmatrix is not None: if self.config.maskmatrix.shape != data.shape: raise errors.BackendError('The mask matrix does not have the same shape as the images') weights *= self.config.maskmatrix intensity = self.apply_mask(image, self.config.xmask, self.config.ymask) weights = self.apply_mask(weights, self.config.xmask, self.config.ymask) dx = self.apply_mask(dx, self.config.xmask, self.config.ymask) dy = self.apply_mask(dy, self.config.xmask, self.config.ymask) dz = self.apply_mask(dz, self.config.xmask, self.config.ymask) #X,Y = numpy.meshgrid(x,y) #Z = numpy.ones(X.shape) * sdd pixels = dx,dy,dz return intensity, weights, (energy, UB, pixels, gamma, delta, omega, alpha, nu) class EH1(IO7Input): def parse_config(self, config): super(EH1, self).parse_config(config) self.config.sdd = float(config.pop('sdd'))#Sample to detector distance (mm) def process_image(self, scan, scanparams, pointparams, image): gamma, delta, omega, chi, phi, alpha, mon, transm = pointparams#GAM, DEL, OMG, CHI, PHI, ALF, MON, TRANSM energy, UB = scanparams weights = numpy.ones_like(image) util.status('{4}| gamma: {0}, delta: {1}, omega: {2}, mu: {3}'.format(gamma, delta, omega, alpha, time.ctime(time.time()))) # pixels to angles pixelsize = numpy.array(self.config.pixelsize) sdd = self.config.sdd nu = scan.metadata.diff1prot centralpixel = self.config.centralpixel # (column, row) = (delta, gamma) dz = (numpy.indices(image.shape)[1] - centralpixel[1]) * pixelsize[1] dx = (numpy.indices(image.shape)[0] - centralpixel[0]) * pixelsize[0] dy = numpy.ones(image.shape) * sdd # masking if self.config.maskmatrix is not None: if self.config.maskmatrix.shape != data.shape: raise errors.BackendError('The mask matrix does not have the same shape as the images') weights *= self.config.maskmatrix intensity = self.apply_mask(image, self.config.xmask, self.config.ymask) weights = self.apply_mask(weights, self.config.xmask, self.config.ymask) dx = self.apply_mask(dx, self.config.xmask, self.config.ymask) dy = self.apply_mask(dy, self.config.xmask, self.config.ymask) dz = self.apply_mask(dz, self.config.xmask, self.config.ymask) pixels = dx,dy,dz return intensity, weights, (energy, UB, pixels, gamma, delta, omega, alpha, nu) def load_matrix(filename): if filename == None: return None if os.path.exists(filename): ext = os.path.splitext(filename)[-1] if ext == '.txt': return numpy.array(numpy.loadtxt(filename), dtype = numpy.bool) elif ext == '.npy': return numpy.array(numpy.load(filename), dtype = numpy.bool) else: raise ValueError('unknown extension {0}, unable to load matrix!\n'.format(ext)) else: raise IOError('filename: {0} does not exist. Can not load matrix'.format(filename)) binoculars-0.0.4/binoculars/backends/sixs.py000066400000000000000000000576171343276063200211550ustar00rootroot00000000000000# -*- encoding: utf-8 -*- '''This file is part of the binoculars project. The BINoculars library is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. The BINoculars library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with the hkl library. If not, see . Copyright (C) 2015-2017 Synchrotron SOLEIL L'Orme des Merisiers Saint-Aubin BP 48 91192 GIF-sur-YVETTE CEDEX Copyright (C) 2012-2015 European Synchrotron Radiation Facility Grenoble, France Authors: Willem Onderwaater Picca Frédéric-Emmanuel ''' import numpy import math import os import tables import sys from collections import namedtuple from math import cos, sin from numpy.linalg import inv from pyFAI.detectors import ALL_DETECTORS from gi.repository import Hkl from .. import backend, errors, util from ..util import as_string # TODO # - Angles delta gamma. nom de 2 ou 3 moteurs. omega puis delta # gamma pour chaque pixels. # - aller cherche dans le fichier NeXuS le x0, y0 ainsi que le sdd. # - travailler en qx qy qz, il faut rajouter un paramètre optionnel # - qui permet de choisir une rotation azimuthal de Qx Qy. ############### # Projections # ############### PDataFrame = namedtuple("PDataFrame", ["pixels", "k", "ub", "R", "P"]) class realspace(backend.ProjectionBase): # scalars: mu, theta, [chi, phi, "omitted"] delta, gamR, gamT, ty, # wavelength 3x3 matrix: UB def project(self, index, pdataframe): return (pdataframe.pixels[1], pdataframe.pixels[2]) def get_axis_labels(self): return 'x', 'y' class Pixels(backend.ProjectionBase): # scalars: mu, theta, [chi, phi, "omitted"] delta, gamR, gamT, ty, # wavelength 3x3 matrix: UB def project(self, index, pdataframe): return numpy.meshgrid(numpy.arange(pdataframe.pixels[0].shape[1]), numpy.arange(pdataframe.pixels[0].shape[0])) def get_axis_labels(self): return 'x', 'y' class HKLProjection(backend.ProjectionBase): # scalars: mu, theta, [chi, phi, "omitted"] delta, gamR, gamT, ty, # wavelength 3x3 matrix: UB def project(self, index, pdataframe): # put the detector at the right position pixels, k, UB, R, P = pdataframe ki = [1, 0, 0] RUB_1 = inv(numpy.dot(R, UB)) RUB_1P = numpy.dot(RUB_1, P) kf = normalized(pixels, axis=0) hkl_f = numpy.tensordot(RUB_1P, kf, axes=1) hkl_i = numpy.dot(RUB_1, ki) hkl = hkl_f - hkl_i[:, numpy.newaxis, numpy.newaxis] h, k, l = hkl * k return (h, k, l) def get_axis_labels(self): return 'H', 'K', 'L' class HKProjection(HKLProjection): def project(self, index, pdataframe): h, k, l = super(HKProjection, self).project(index, pdataframe) return h, k def get_axis_labels(self): return 'H', 'K' class QxQyQzProjection(backend.ProjectionBase): def project(self, index, pdataframe): # put the detector at the right position pixels, k, _, R, P = pdataframe # TODO factorize with HklProjection. Here a trick in order to # compute Qx Qy Qz in the omega basis. UB = numpy.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]]) if self.config.mu_offset is not None: UB = numpy.dot(UB, M(self.config.mu_offset, [0, 1, 0])) if self.config.omega_offset is not None: UB = numpy.dot(UB, M(self.config.omega_offset, [0, 0, -1])) # UB = numpy.array([[1, 0, 0], # [0, 1, 0], # [0, 0, 1]]) # the ki vector should be in the NexusFile or easily extracted # from the hkl library. ki = [1, 0, 0] RUB_1 = inv(numpy.dot(R, UB)) RUB_1P = numpy.dot(RUB_1, P) kf = normalized(pixels, axis=0) hkl_f = numpy.tensordot(RUB_1P, kf, axes=1) hkl_i = numpy.dot(RUB_1, ki) hkl = hkl_f - hkl_i[:, numpy.newaxis, numpy.newaxis] qx, qy, qz = hkl * k return qx, qy, qz def get_axis_labels(self): return "Qx", "Qy", "Qz" def parse_config(self, config): super(QxQyQzProjection, self).parse_config(config) # omega offset for the sample in degree then convert into radian omega_offset = config.pop('omega_offset', None) if omega_offset is not None: self.config.omega_offset = math.radians(float(omega_offset)) else: self.config.omega_offset = None # omega offset for the sample in degree then convert into radian mu_offset = config.pop('mu_offset', None) if mu_offset is not None: self.config.mu_offset = math.radians(float(mu_offset)) else: self.config.mu_offset = None class QparQperProjection(QxQyQzProjection): def project(self, index, pdataframe): qx, qy, qz = super(QparQperProjection, self).project(index, pdataframe) return numpy.sqrt(qx*qx + qy*qy), qz def get_axis_labels(self): return 'Qpar', 'Qper' class Stereo(QxQyQzProjection): def project(self, index, pdataframe): qx, qy, qz = super(Stereo, self).project(index, pdataframe) q = numpy.sqrt(qx*qx+qy*qy+qz*qz) # ratio = qz + q # x = qx / ratio # y = qy / ratio return q, qx, qy def get_axis_labels(self): return "Q", "Qx", "Qy" class QIndex(Stereo): def project(self, index, pdataframe): q, qx, qy = super(QIndex, self).project(index, pdataframe) return q, numpy.ones_like(q) * index def get_axis_labels(self): return "Q", "Index" ################### # Common methodes # ################### WRONG_ATTENUATION = -100 def get_nxclass(hfile, nxclass, path="/"): """ :param hfile: the hdf5 file. :type hfile: tables.file. :param nxclass: the nxclass to extract :type nxclass: str """ for node in hfile.walk_nodes(path): try: if nxclass == as_string(node._v_attrs['NX_class']): return node except KeyError: pass return None def node_as_string(node): if node.shape == (): content = node.read().tostring() else: content = node[0] return as_string(content) Diffractometer = namedtuple('Diffractometer', ['name', # name of the hkl diffractometer 'ub', # the UB matrix 'geometry']) # the HklGeometry def get_diffractometer(hfile): """ Construct a Diffractometer from a NeXus file """ node = get_nxclass(hfile, 'NXdiffractometer') name = node_as_string(node.type) # remove the last "\n" char name = name[:-1] ub = node.UB[:] factory = Hkl.factories()[name] geometry = factory.create_new_geometry() # wavelength = get_nxclass(hfile, 'NXmonochromator').wavelength[0] # geometry.wavelength_set(wavelength) return Diffractometer(name, ub, geometry) Sample = namedtuple("Sample", ["a", "b", "c", "alpha", "beta", "gamma", "ux", "uy", "uz", "sample"]) def get_sample(hfile): # hkl sample a = b = c = 1.54 alpha = beta = gamma = 90 ux = uy = uz = 0 sample = Hkl.Sample.new("test") lattice = Hkl.Lattice.new(a, b, c, math.radians(alpha), math.radians(beta), math.radians(gamma)) sample.lattice_set(lattice) parameter = sample.ux_get() parameter.value_set(ux, Hkl.UnitEnum.USER) sample.ux_set(parameter) parameter = sample.uy_get() parameter.value_set(uy, Hkl.UnitEnum.USER) sample.uy_set(parameter) parameter = sample.uz_get() parameter.value_set(uz, Hkl.UnitEnum.USER) sample.uz_set(parameter) return Sample(1.54, 1.54, 1.54, 90, 90, 90, 0, 0, 0, sample) Detector = namedtuple("Detector", ["name", "detector"]) def get_detector(hfile): detector = Hkl.Detector.factory_new(Hkl.DetectorType(0)) return Detector("imxpads140", detector) Source = namedtuple("Source", ["wavelength"]) def get_source(hfile): wavelength = get_nxclass(hfile, 'NXmonochromator').wavelength[0] return Source(wavelength) DataFrame = namedtuple("DataFrame", ["diffractometer", "sample", "detector", "source", "h5_nodes"]) def dataframes(hfile, data_path=None): diffractometer = get_diffractometer(hfile) sample = get_sample(hfile) detector = get_detector(hfile) source = get_source(hfile) for group in hfile.get_node('/'): scan_data = group._f_get_child("scan_data") # now instantiate the pytables objects h5_nodes = {} for key, hitem in data_path.items(): try: child = scan_data._f_get_child(hitem.name) except tables.exceptions.NoSuchNodeError: if hitem.optional: child = None else: raise h5_nodes[key] = child yield DataFrame(diffractometer, sample, detector, source, h5_nodes) def get_ki(wavelength): """ for now the direction is always along x """ TAU = 2 * math.pi return numpy.array([TAU / wavelength, 0, 0]) def normalized(a, axis=-1, order=2): l2 = numpy.atleast_1d(numpy.linalg.norm(a, order, axis)) l2[l2 == 0] = 1 return a / numpy.expand_dims(l2, axis) def hkl_matrix_to_numpy(m): M = numpy.empty((3, 3)) for i in range(3): for j in range(3): M[i, j] = m.get(i, j) return M def M(theta, u): """ :param theta: the axis value in radian :type theta: float :param u: the axis vector [x, y, z] :type u: [float, float, float] :return: the rotation matrix :rtype: numpy.ndarray (3, 3) """ c = cos(theta) one_minus_c = 1 - c s = sin(theta) return numpy.array([[c + u[0]**2 * one_minus_c, u[0] * u[1] * one_minus_c - u[2] * s, u[0] * u[2] * one_minus_c + u[1] * s], [u[0] * u[1] * one_minus_c + u[2] * s, c + u[1]**2 * one_minus_c, u[1] * u[2] * one_minus_c - u[0] * s], [u[0] * u[2] * one_minus_c - u[1] * s, u[1] * u[2] * one_minus_c + u[0] * s, c + u[2]**2 * one_minus_c]]) ################## # Input Backends # ################## class SIXS(backend.InputBase): # OFFICIAL API dbg_scanno = None dbg_pointno = None def generate_jobs(self, command): scans = util.parse_multi_range(','.join(command).replace(' ', ',')) if not len(scans): sys.stderr.write('error: no scans selected, nothing to do\n') for scanno in scans: util.status('processing scan {0}...'.format(scanno)) if self.config.pr: pointcount = self.config.pr[1] - self.config.pr[0] + 1 start = self.config.pr[0] else: start = 0 pointcount = self.get_pointcount(scanno) if pointcount > self.config.target_weight * 1.4: for s in util.chunk_slicer(pointcount, self.config.target_weight): yield backend.Job(scan=scanno, firstpoint=start+s.start, lastpoint=start+s.stop-1, weight=s.stop-s.start) else: yield backend.Job(scan=scanno, firstpoint=start, lastpoint=start+pointcount-1, weight=pointcount) def process_job(self, job): super(SIXS, self).process_job(job) with tables.open_file(self.get_filename(job.scan), 'r') as scan: self.metadict = dict() try: for dataframe in dataframes(scan, self.HPATH): pixels = self.get_pixels(dataframe.detector) detector = ALL_DETECTORS[dataframe.detector.name]() mask = detector.mask.astype(numpy.bool) maskmatrix = load_matrix(self.config.maskmatrix) if maskmatrix is not None: mask = numpy.bitwise_or(mask, maskmatrix) for index in range(job.firstpoint, job.lastpoint + 1): yield self.process_image(index, dataframe, pixels, mask) # noqa util.statuseol() except Exception as exc: exc.args = errors.addmessage(exc.args, ', An error occured for scan {0} at point {1}. See above for more information'.format(self.dbg_scanno, self.dbg_pointno)) # noqa raise self.metadata.add_section('sixs_backend', self.metadict) def parse_config(self, config): super(SIXS, self).parse_config(config) # Optional, select a subset of the image range in the x # direction. all by default self.config.xmask = util.parse_multi_range(config.pop('xmask', None)) # Optional, select a subset of the image range in the y # direction. all by default self.config.ymask = util.parse_multi_range(config.pop('ymask', None)) # location of the nexus files (take precedence on nexusfile) self.config.nexusdir = config.pop('nexusdir', None) # Location of the specfile self.config.nexusfile = config.pop('nexusfile', None) # Optional, all range by default self.config.pr = config.pop('pr', None) if self.config.xmask is None: self.config.xmask = slice(None) if self.config.ymask is None: self.config.ymask = slice(None) if self.config.pr: self.config.pr = util.parse_tuple(self.config.pr, length=2, type=int) # noqa # sample to detector distance (mm) self.config.sdd = float(config.pop('sdd')) # x,y coordinates of the central pixel self.config.centralpixel = util.parse_tuple(config.pop('centralpixel'), length=2, type=int) # noqa # Optional, if supplied pixels where the mask is 0 will be removed self.config.maskmatrix = config.pop('maskmatrix', None) # detector rotation around x (1, 0, 0) self.config.detrot = config.pop('detrot', None) if self.config.detrot is not None: try: self.config.detrot = float(self.config.detrot) except ValueError: self.config.detrot = None # attenuation_coefficient (Optional) attenuation_coefficient = config.pop('attenuation_coefficient', None) if attenuation_coefficient is not None: try: self.config.attenuation_coefficient = float(attenuation_coefficient) # noqa except ValueError: self.config.attenuation_coefficient = None else: self.config.attenuation_coefficient = None def get_destination_options(self, command): if not command: return False command = ','.join(command).replace(' ', ',') scans = util.parse_multi_range(command) return dict(first=min(scans), last=max(scans), range=','.join(str(scan) for scan in scans)) # noqa # CONVENIENCE FUNCTIONS def get_filename(self, scanno): filename = None if self.config.nexusdir: dirname = self.config.nexusdir files = [f for f in os.listdir(dirname) if ((str(scanno).zfill(5) in f) and (os.path.splitext(f)[1] in ['.hdf5', '.nxs'])) ] if files is not []: filename = os.path.join(dirname, files[0]) else: filename = self.config.nexusfile.format(scanno=str(scanno).zfill(5)) # noqa if not os.path.exists(filename): raise errors.ConfigError('nexus filename does not exist: {0}'.format(filename)) # noqa return filename @staticmethod def apply_mask(data, xmask, ymask): roi = data[ymask, :] return roi[:, xmask] HItem = namedtuple("HItem", ["name", "optional"]) class FlyScanUHV(SIXS): HPATH = { "image": HItem("xpad_image", False), "mu": HItem("UHV_MU", False), "omega": HItem("UHV_OMEGA", False), "delta": HItem("UHV_DELTA", False), "gamma": HItem("UHV_GAMMA", False), "attenuation": HItem("attenuation", True), } def get_pointcount(self, scanno): # just open the file in order to extract the number of step with tables.open_file(self.get_filename(scanno), 'r') as scan: return get_nxclass(scan, "NXdata").xpad_image.shape[0] def get_attenuation(self, index, h5_nodes, offset): attenuation = None if self.config.attenuation_coefficient is not None: try: node = h5_nodes['attenuation'] if node is not None: attenuation = node[index + offset] else: raise Exception("you asked for attenuation but the file does not contain attenuation informations.") # noqa except IndexError: attenuation = WRONG_ATTENUATION return attenuation def get_values(self, index, h5_nodes): image = h5_nodes['image'][index] mu = h5_nodes['mu'][index] omega = h5_nodes['omega'][index] delta = h5_nodes['delta'][index] gamma = h5_nodes['gamma'][index] attenuation = self.get_attenuation(index, h5_nodes, 2) return (image, attenuation, (mu, omega, delta, gamma)) def process_image(self, index, dataframe, pixels, mask): util.status(str(index)) # extract the data from the h5 nodes h5_nodes = dataframe.h5_nodes intensity, attenuation, values = self.get_values(index, h5_nodes) # BEWARE in order to avoid precision problem we convert the # uint16 -> float32. (the size of the mantis is on 23 bits) # enought to contain the uint16. If one day we use uint32, it # should be necessary to convert into float64. intensity = intensity.astype('float32') weights = None if self.config.attenuation_coefficient is not None: if attenuation != WRONG_ATTENUATION: intensity *= self.config.attenuation_coefficient ** attenuation weights = numpy.ones_like(intensity) weights *= ~mask else: weights = numpy.zeros_like(intensity) else: weights = numpy.ones_like(intensity) weights *= ~mask k = 2 * math.pi / dataframe.source.wavelength hkl_geometry = dataframe.diffractometer.geometry hkl_geometry.axis_values_set(values, Hkl.UnitEnum.USER) # sample hkl_sample = dataframe.sample.sample q_sample = hkl_geometry.sample_rotation_get(hkl_sample) R = hkl_matrix_to_numpy(q_sample.to_matrix()) # detector hkl_detector = dataframe.detector.detector q_detector = hkl_geometry.detector_rotation_get(hkl_detector) P = hkl_matrix_to_numpy(q_detector.to_matrix()) if self.config.detrot is not None: P = numpy.dot(P, M(math.radians(self.config.detrot), [1, 0, 0])) pdataframe = PDataFrame(pixels, k, dataframe.diffractometer.ub, R, P) return intensity, weights, (index, pdataframe) def get_pixels(self, detector): # works only for flat detector. detector = ALL_DETECTORS[detector.name]() y, x, _ = detector.calc_cartesian_positions() y0 = y[self.config.centralpixel[1], self.config.centralpixel[0]] x0 = x[self.config.centralpixel[1], self.config.centralpixel[0]] z = numpy.ones(x.shape) * -1 * self.config.sdd # return converted to the hkl library coordinates # x -> -y # y -> z # z -> -x return numpy.array([-z, -(x - x0), (y - y0)]) class FlyScanUHV2(FlyScanUHV): HPATH = { "image": HItem("xpad_image", False), "mu": HItem("mu", False), "omega": HItem("omega", False), "delta": HItem("delta", False), "gamma": HItem("gamma", False), "attenuation": HItem("attenuation", True), } class FlyMedH(FlyScanUHV): HPATH = { "image": HItem("xpad_image", False), "pitch": HItem("beta", True), "mu": HItem("mu", False), "gamma": HItem("gamma", False), "delta": HItem("delta", False), "attenuation": HItem("attenuation", True), } def get_values(self, index, h5_nodes): image = h5_nodes['image'][index] pitch = h5_nodes['pitch'][index] if h5_nodes['pitch'] else 0.3 mu = h5_nodes['mu'][index] gamma = h5_nodes['gamma'][index] delta = h5_nodes['delta'][index] attenuation = self.get_attenuation(index, h5_nodes, 2) return (image, attenuation, (pitch, mu, gamma, delta)) class SBSMedH(FlyScanUHV): HPATH = { "image": HItem("data_03", False), "pitch": HItem("data_22", False), "mu": HItem("data_18", False), "gamma": HItem("data_20", False), "delta": HItem("data_19", False), "attenuation": HItem("data_xx", True), } def get_pointcount(self, scanno): # just open the file in order to extract the number of step with tables.open_file(self.get_filename(scanno), 'r') as scan: return get_nxclass(scan, "NXdata").data_03.shape[0] def get_values(self, index, h5_nodes): image = h5_nodes['image'][index] pitch = h5_nodes['pitch'][index] mu = h5_nodes['mu'][index] gamma = h5_nodes['gamma'][index] delta = h5_nodes['delta'][index] attenuation = self.get_attenuation(index, h5_nodes, 2) return (image, attenuation, (pitch, mu, gamma, delta)) class FlyMedV(FlyScanUHV): HPATH = { "image": HItem("xpad_image", False), "beta": HItem("beta", True), "mu": HItem("mu", False), "omega": HItem("omega", False), "gamma": HItem("gamma", False), "delta": HItem("delta", False), "etaa": HItem("etaa", True), "attenuation": HItem("attenuation", True), } def get_values(self, index, h5_nodes): image = h5_nodes['image'][index] beta = h5_nodes['beta'][index] if h5_nodes['beta'] else 0.0 mu = h5_nodes['mu'][index] omega = h5_nodes['omega'][index] gamma = h5_nodes['gamma'][index] delta = h5_nodes['delta'][index] etaa = h5_nodes['etaa'][index] if h5_nodes['etaa'] else 0.0 attenuation = self.get_attenuation(index, h5_nodes, 2) return (image, attenuation, (beta, mu, omega, gamma, delta, etaa)) def load_matrix(filename): if filename is None: return None if os.path.exists(filename): ext = os.path.splitext(filename)[-1] if ext == '.txt': return numpy.array(numpy.loadtxt(filename), dtype=numpy.bool) elif ext == '.npy': mask = numpy.array(numpy.load(filename), dtype=numpy.bool) print("loaded mask sum: ", numpy.sum(mask)) return mask else: raise ValueError('unknown extension {0}, unable to load matrix!\n'.format(ext)) # noqa else: raise IOError('filename: {0} does not exist. Can not load matrix'.format(filename)) # noqa binoculars-0.0.4/binoculars/dispatcher.py000077500000000000000000000311241343276063200205270ustar00rootroot00000000000000import sys import os import time import itertools import subprocess import multiprocessing from . import util, errors, space #python3 support PY3 = sys.version_info > (3,) class Destination(object): type = filename = overwrite = value = config = limits = None opts = {} def set_final_filename(self, filename, overwrite): self.type = 'final' self.filename = filename self.overwrite = overwrite def set_final_options(self, opts): if opts is not False: self.opts = opts def set_limits(self, limits): self.limits = limits def set_config(self, conf): self.config = conf def set_tmp_filename(self, filename): self.type = 'tmp' self.filename = filename def set_memory(self): self.type = 'memory' def store(self, verse): self.value = None if verse.dimension == 0: raise ValueError('Empty output, Multiverse contains no spaces') if self.type == 'memory': self.value = verse elif self.type == 'tmp': verse.tofile(self.filename) elif self.type == 'final': for sp, fn in zip(verse.spaces, self.final_filenames()): sp.config = self.config sp.tofile(fn) def retrieve(self): if self.type == 'memory': return self.value def final_filenames(self): fns = [] if not self.limits == None: base, ext = os.path.splitext(self.filename) for limlabel in util.limit_to_filelabel(self.limits): fn = (base + '_' + limlabel + ext).format(**self.opts) if not self.overwrite: fn = util.find_unused_filename(fn) fns.append(fn) else: fn = self.filename.format(**self.opts) if not self.overwrite: fn = util.find_unused_filename(fn) fns.append(fn) return fns class DispatcherBase(util.ConfigurableObject): def __init__(self, config, main): self.main = main super(DispatcherBase, self).__init__(config) def parse_config(self, config): super(DispatcherBase, self).parse_config(config) self.config.destination = Destination() destination = config.pop('destination', 'output.hdf5') # optional 'output.hdf5' by default overwrite = util.parse_bool(config.pop('overwrite', 'false')) #by default: numbered files in the form output_ # .hdf5: self.config.destination.set_final_filename(destination, overwrite) # explicitly parsing the options first helps with the debugging self.config.host = config.pop('host', None) # ip adress of the running gui awaiting the spaces self.config.port = config.pop('port', None) # port of the running gui awaiting the spaces self.config.send_to_gui = util.parse_bool(config.pop('send_to_gui', 'false')) # previewing the data, if true, also specify host and port def send(self, verses): # provides the possiblity to send the results to the gui over the network if self.config.send_to_gui or (self.config.host is not None and self.config.host is not None): # only continue of ip is specified and send_to_server is flagged for M in verses: if self.config.destination.limits is None: sp = M.spaces[0] if isinstance(sp, space.Space): util.socket_send(self.config.host, int(self.config.port), util.serialize(sp, ','.join(self.main.config.command))) else: for sp, label in zip(M.spaces, util.limit_to_filelabel(self.config.destination.limits)): if isinstance(sp, space.Space): util.socket_send(self.config.host, int(self.config.port), util.serialize(sp, '{0}_{1}'.format(','.join(self.main.config.command), label))) yield M else: for M in verses: yield M def has_specific_task(self): return False def process_jobs(self, jobs): raise NotImplementedError def sum(self, results): raise NotImplementedError # The simplest possible dispatcher. Does the work all by itself on a single # thread/core/node. 'Local' will most likely suit your needs better. class SingleCore(DispatcherBase): def process_jobs(self, jobs): for job in jobs: yield self.main.process_job(job) def sum(self, results): return space.chunked_sum(self.send(results)) # Base class for Dispatchers using subprocesses to do some work. class ReentrantBase(DispatcherBase): actions = 'user', def parse_config(self, config): super(ReentrantBase, self).parse_config(config) self.config.action = config.pop('action', 'user').lower() if self.config.action not in self.actions: raise errors.ConfigError('action {0} not recognized for {1}'.format(self.config.action, self.__class__.__name__)) def has_specific_task(self): if self.config.action == 'user': return False else: return True def run_specific_task(self, command): raise NotImplementedError # Dispatch multiple worker processes locally, while doing the summation in the main process class Local(ReentrantBase): ### OFFICIAL API actions = 'user', 'job' def parse_config(self, config): super(Local, self).parse_config(config) self.config.ncores = int(config.pop('ncores', 0)) # optionally, specify number of cores (autodetect by default) if self.config.ncores <= 0: self.config.ncores = multiprocessing.cpu_count() def process_jobs(self, jobs): if self.config.ncores == 1 and not PY3: # note: SingleCore will be marginally faster map = itertools.imap else: pool = multiprocessing.Pool(self.config.ncores) map = pool.imap_unordered configs = (self.prepare_config(job) for job in jobs) for result in map(self.main.get_reentrant(), configs): yield result def sum(self, results): return space.chunked_sum(self.send(results)) def run_specific_task(self, command): if command: raise errors.SubprocessError("invalid command, too many parameters: '{0}'".format(command)) if self.config.action == 'job': result = self.main.process_job(self.config.job) self.config.destination.store(result) ### UTILITY def prepare_config(self, job): config = self.main.clone_config() config.dispatcher.destination.set_memory() config.dispatcher.action = 'job' config.dispatcher.job = job return config, () # Dispatch many worker processes on an Oar cluster. class Oar(ReentrantBase): ### OFFICIAL API actions = 'user', 'process' def parse_config(self, config): super(Oar, self).parse_config(config) self.config.tmpdir = config.pop('tmpdir', os.getcwd()) # Optional, current directory by default self.config.oarsub_options = config.pop('oarsub_options', 'walltime=0:15') # optionally, tweak oarsub parameters self.config.executable = config.pop('executable', ' '.join(util.get_python_executable())) # optionally, override default location of python and/or BINoculars installation def process_jobs(self, jobs): self.configfiles = [] self.intermediates = [] clusters = util.cluster_jobs2(jobs, self.main.input.config.target_weight) for jobscluster in clusters: uniq = util.uniqid() jobconfig = os.path.join(self.config.tmpdir, 'binoculars-{0}-jobcfg.zpi'.format(uniq)) self.configfiles.append(jobconfig) config = self.main.clone_config() interm = os.path.join(self.config.tmpdir, 'binoculars-{0}-jobout.hdf5'.format(uniq)) self.intermediates.append(interm) config.dispatcher.destination.set_tmp_filename(interm) config.dispatcher.sum = () config.dispatcher.action = 'process' config.dispatcher.jobs = jobscluster util.zpi_save(config, jobconfig) yield self.oarsub(jobconfig) #if all jobs are sent to the cluster send the process that sums all other jobs uniq = util.uniqid() jobconfig = os.path.join(self.config.tmpdir, 'binoculars-{0}-jobcfg.zpi'.format(uniq)) self.configfiles.append(jobconfig) config = self.main.clone_config() config.dispatcher.sum = self.intermediates config.dispatcher.action = 'process' config.dispatcher.jobs = () util.zpi_save(config, jobconfig) yield self.oarsub(jobconfig) def sum(self, results): jobs = list(results) jobscopy = jobs[:] self.oarwait(jobs) self.oar_cleanup(jobscopy) return True def run_specific_task(self, command): if self.config.action != 'process' or (not self.config.jobs and not self.config.sum) or command: raise errors.SubprocessError("invalid command, too many parameters or no jobs/sum given") jobs = sum = space.EmptyVerse() if self.config.jobs: jobs = space.verse_sum(self.send(self.main.process_job(job) for job in self.config.jobs)) if self.config.sum: sum = space.chunked_sum(space.Multiverse.fromfile(src) for src in util.yield_when_exists(self.config.sum)) self.config.destination.store(jobs + sum) ### calling OAR @staticmethod def subprocess_run(*command): process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, unused_err = process.communicate() retcode = process.poll() return retcode, output def oarsub(self, *args): command = '{0} process {1}'.format(self.config.executable, ' '.join(args)) ret, output = self.subprocess_run('oarsub', '-l {0}'.format(self.config.oarsub_options), command) if ret == 0: lines = output.split('\n') for line in lines: if line.startswith('OAR_JOB_ID='): void, jobid = line.split('=') util.status('{0}: Launched job {1}'.format(time.ctime(), jobid)) return jobid.strip() return False def oarstat(self, jobid): # % oarstat -s -j 5651374 # 5651374: Running # % oarstat -s -j 5651374 # 5651374: Finishing ret, output = self.subprocess_run('oarstat', '-s', '-j', str(jobid)) if ret == 0: for n in output.split('\n'): if n.startswith(str(jobid)): job, status = n.split(':') return status.strip() else: return 'Unknown' def oarwait(self, jobs, remaining=0): linelen = 0 if len(jobs) > remaining: util.status('{0}: getting status of {1} jobs...'.format(time.ctime(), len(jobs))) else: return delay = util.loop_delayer(30) while len(jobs) > remaining: next(delay) i = 0 R = 0 W = 0 U = 0 while i < len(jobs): state = self.oarstat(jobs[i]) if state == 'Running': R += 1 elif state in ('Waiting', 'toLaunch', 'Launching'): W += 1 elif state == 'Unknown': U += 1 else: # assume state == 'Finishing' or 'Terminated' but don't wait on something unknown del jobs[i] i -= 1 # otherwise it skips a job i += 1 util.status('{0}: {1} jobs to go. {2} waiting, {3} running, {4} unknown.'.format(time.ctime(), len(jobs), W, R, U)) util.statuseol() def oar_cleanup(self, jobs): # cleanup: for f in itertools.chain(self.configfiles, self.intermediates): try: os.remove(f) except Exception as e: print("unable to remove {0}: {1}".format(f, e)) errorfn = [] for jobid in jobs: errorfilename = 'OAR.{0}.stderr'.format(jobid) if os.path.exists(errorfilename): with open(errorfilename, 'r') as fp: errormsg = fp.read() if len(errormsg) > 0: errorfn.append(errorfilename) print('Critical error: OAR Job {0} failed with the following error: \n{1}'.format(jobid, errormsg)) if len(errorfn) > 0: print('Warning! {0} job(s) failed. See above for the details or the error log files: {1}'.format(len(errorfn), ', '.join(errorfn))) binoculars-0.0.4/binoculars/errors.py000066400000000000000000000010451343276063200177110ustar00rootroot00000000000000# TODO: present exceptions based on errors.ExceptionBase in a gentle way to the user class ExceptionBase(Exception): pass class ConfigError(ExceptionBase): pass class FileError(ExceptionBase): pass class HDF5FileError(FileError): pass class SubprocessError(ExceptionBase): pass class BackendError(ExceptionBase): pass class CommunicationError(ExceptionBase): pass def addmessage(args, errormsg): if not args: arg0 = '' else: arg0 = args[0] arg0 += errormsg return (arg0, ) binoculars-0.0.4/binoculars/fit.py000066400000000000000000000230531343276063200171620ustar00rootroot00000000000000import numpy import scipy.optimize import scipy.special import inspect import re class FitBase(object): parameters = None guess = None result = None summary = None fitdata = None def __init__(self, space, guess=None): self.space = space code = inspect.getsource(self.func) args = tuple( re.findall('\((.*?)\)', line)[0].split(',') for line in code.split('\n')[2:4]) if space.dimension != len(args[0]): raise ValueError('dimension mismatch: space has {0}, {1.__class__.__name__} expects {2}'.format(space.dimension, self, len(args[0]))) self.parameters = args[1] self.xdata, self.ydata, self.cxdata, self.cydata = self._prepare(self.space) if guess is not None: if len(guess) != len(self.parameters): raise ValueError('invalid number of guess parameters {0!r} for {1!r}'.format(guess, self.parameters)) self.guess = guess else: self._guess() self.success = self._fit() @staticmethod def _prepare(space): ydata = space.get_masked() cydata = ydata.compressed() imask = ~ydata.mask xdata = space.get_grid() cxdata = tuple(d[imask] for d in xdata) return xdata, ydata, cxdata, cydata def _guess(self): # the implementation should use space and/or self.xdata/self.ydata and/or the cxdata/cydata maskless versions to obtain guess raise NotImplementedError def _fitfunc(self, params): return self.cydata - self.func(self.cxdata, params) def _fit(self): result = scipy.optimize.leastsq(self._fitfunc, self.guess, full_output=True, epsfcn=0.000001) self.message = re.sub('\s{2,}', ' ', result[3].strip()) self.result = result[0] errdata = result[2]['fvec'] if result[1] is None: self.variance = numpy.zeros(len(self.result)) else: self.variance = numpy.diagonal(result[1] * (errdata**2).sum() / (len(errdata) - len(self.result))) self.fitdata = numpy.ma.array(self.func(self.xdata, self.result), mask=self.ydata.mask) self.summary = '\n'.join('%s: %.4g +/- %.4g' % (n, p, v) for (n, p, v) in zip(self.parameters, self.result, self.variance)) return result[4] in (1, 2, 3, 4) # corresponds to True on success, False on failure def __str__(self): return '{0.__class__.__name__} fit on {1}\n{2}\n{3}'.format(self, self.space, self.message, self.summary) class PeakFitBase(FitBase): def __init__(self, space, guess=None, loc=None): if loc != None: self.argmax = tuple(loc) else: self.argmax = None super(PeakFitBase, self).__init__(space, guess) def _guess(self): maximum = self.cydata.max() # for background determination background = self.cydata < (numpy.median(self.cydata) + maximum) / 2 if any(background == True): # the fit will fail if background is flas for all linparams = self._linfit(list(grid[background] for grid in self.cxdata), self.cydata[background]) else: linparams = numpy.zeros(len(self.cxdata) + 1) simbackground = linparams[-1] + numpy.sum(numpy.vstack([param * grid.flatten() for (param, grid) in zip(linparams[:-1], self.cxdata)]), axis=0) signal = self.cydata - simbackground if self.argmax != None: argmax = self.argmax else: argmax = tuple((signal * grid).sum() / signal.sum() for grid in self.cxdata) argmax_bkg = linparams[-1] + numpy.sum(numpy.vstack([param * grid.flatten() for (param, grid) in zip(linparams[:-1], argmax)])) try: maximum = self.space[argmax] - argmax_bkg except ValueError: maximum = self.cydata.max() if numpy.isnan(maximum): maximum = self.cydata.max() self.set_guess(maximum, argmax, linparams) def _linfit(self, coordinates, intensity): coordinates = list(coordinates) coordinates.append(numpy.ones_like(coordinates[0])) matrix = numpy.vstack([coords.flatten() for coords in coordinates]).T return numpy.linalg.lstsq(matrix, intensity, rcond=None)[0] class AutoDimensionFit(FitBase): def __new__(cls, space, guess=None): if space.dimension in cls.dimensions: return cls.dimensions[space.dimension](space, guess) else: raise TypeError('{0}-dimensional space not supported for {1.__name__}'.format(space.dimension, cls)) # utility functions def rot2d(x, y, th): xrot = x * numpy.cos(th) + y * numpy.sin(th) yrot = - x * numpy.sin(th) + y * numpy.cos(th) return xrot, yrot def rot3d(x, y, z, th, ph): xrot = numpy.cos(th) * x + numpy.sin(th) * numpy.sin(ph) * y + numpy.sin(th) * numpy.cos(ph) * z yrot = numpy.cos(ph) * y - numpy.sin(ph) * z zrot = -numpy.sin(th) * x + numpy.cos(th) * numpy.sin(ph) * y + numpy.cos(th) * numpy.cos(ph) * z return xrot, yrot, zrot def get_class_by_name(name): options = {} for k, v in globals().items(): if isinstance(v, type) and issubclass(v, FitBase): options[k.lower()] = v if name.lower() in options: return options[name.lower()] else: raise ValueError("unsupported fit function '{0}'".format(name)) # fitting functions class Lorentzian1D(PeakFitBase): @staticmethod def func(grid, params): (x, ) = grid (I, loc, gamma, slope, offset) = params return I / ((x - loc)**2 + gamma**2) + offset + x * slope def set_guess(self, maximum, argmax, linparams): gamma0 = 5 * self.space.axes[0].res # estimated FWHM on 10 pixels self.guess = [maximum, argmax[0], gamma0, linparams[0], linparams[1]] class Lorentzian1DNoBkg(PeakFitBase): @staticmethod def func(grid, params): (x, ) = grid (I, loc, gamma) = xxx_todo_changeme3 return I / ((x - loc)**2 + gamma**2) def set_guess(self, maximum, argmax, linparams): gamma0 = 5 * self.space.axes[0].res # estimated FWHM on 10 pixels self.guess = [maximum, argmax[0], gamma0] class PolarLorentzian2Dnobkg(PeakFitBase): @staticmethod def func(grid, params): (x, y) = grid (I, loc0, loc1, gamma0, gamma1, th) = params a, b = tuple(grid - center for grid, center in zip(rot2d(x, y, th), rot2d(loc0, loc1, th))) return (I / (1 + (a / gamma0)**2 + (b / gamma1)**2)) def set_guess(self, maximum, argmax, linparams): gamma0 = self.space.axes[0].res # estimated FWHM on 10 pixels gamma1 = self.space.axes[1].res self.guess = [maximum, argmax[0], argmax[1], gamma0, gamma1, 0] class PolarLorentzian2D(PeakFitBase): @staticmethod def func(grid, params): (x, y) = grid (I, loc0, loc1, gamma0, gamma1, th, slope1, slope2, offset) = params a, b = tuple(grid - center for grid, center in zip(rot2d(x, y, th), rot2d(loc0, loc1, th))) return (I / (1 + (a / gamma0)**2 + (b / gamma1)**2) + x * slope1 + y * slope2 + offset) def set_guess(self, maximum, argmax, linparams): gamma0 = self.space.axes[0].res # estimated FWHM on 10 pixels gamma1 = self.space.axes[1].res self.guess = [maximum, argmax[0], argmax[1], gamma0, gamma1, 0, linparams[0], linparams[1], linparams[2]] def integrate_signal(self): return self.func(self.cxdata, (self.result[0], self.result[1], self.result[2], self.result[3], self.result[4], self.result[5], 0, 0, 0)).sum() class Lorentzian2D(PeakFitBase): @staticmethod def func(grid, params): (x, y) = grid (I, loc0, loc1, gamma0, gamma1, th, slope1, slope2, offset) = params a, b = tuple(grid - center for grid, center in zip(rot2d(x, y, th), rot2d(loc0, loc1, th))) return (I / (1 + (a/gamma0)**2) * 1 / (1 + (b/gamma1)**2) + x * slope1 + y * slope2 + offset) def set_guess(self, maximum, argmax, linparams): gamma0 = 5 * self.space.axes[0].res # estimated FWHM on 10 pixels gamma1 = 5 * self.space.axes[1].res self.guess = [maximum, argmax[0], argmax[1], gamma0, gamma1, 0, linparams[0], linparams[1], linparams[2]] class Lorentzian2Dnobkg(PeakFitBase): @staticmethod def func(grid, params): (x, y) = grid (I, loc0, loc1, gamma0, gamma1, th) = params a, b = tuple(grid - center for grid, center in zip(rot2d(x, y, th), rot2d(loc0, loc1, th))) return (I / (1 + (a/gamma0)**2) * 1 / (1 + (b/gamma1)**2)) def set_guess(self, maximum, argmax, linparams): gamma0 = 5 * self.space.axes[0].res # estimated FWHM on 10 pixels gamma1 = 5 * self.space.axes[1].res self.guess = [maximum, argmax[0], argmax[1], gamma0, gamma1, 0] class Lorentzian(AutoDimensionFit): dimensions = {1: Lorentzian1D, 2: PolarLorentzian2D} class Gaussian1D(PeakFitBase): @staticmethod def func(grid, params): (x,) = grid (loc, I, sigma, offset, slope) = params return I * numpy.exp(-((x-loc)/sigma)**2/2) + offset + x * slope class Voigt1D(PeakFitBase): @staticmethod def func(grid, params): (x, ) = grid (I, loc, sigma, gamma, slope, offset) = params z = (x - loc + numpy.complex(0, gamma)) / (sigma * numpy.sqrt(2)) return I * numpy.real(scipy.special.wofz(z))/(sigma * numpy.sqrt(2 * numpy.pi)) + offset + x * slope def set_guess(self, maximum, argmax, linparams): gamma0 = 5 * self.space.axes[0].res # estimated FWHM on 10 pixels self.guess = [maximum, argmax[0], 0.01, gamma0, linparams[0], linparams[1]] binoculars-0.0.4/binoculars/main.py000077500000000000000000000144141343276063200173300ustar00rootroot00000000000000import os import sys import argparse from . import space, backend, util, errors def parse_args(args): parser = argparse.ArgumentParser(prog='binoculars process') parser.add_argument('-c', metavar='SECTION:OPTION=VALUE', action='append', type=parse_commandline_config_option, default=[], help='additional configuration option in the form section:option=value') parser.add_argument('configfile', help='configuration file') parser.add_argument('command', nargs='*', default=[]) return parser.parse_args(args) def parse_commandline_config_option(s): try: key, value = s.split('=', 1) section, option = key.split(':') except ValueError: raise argparse.ArgumentTypeError("configuration specification '{0}' not in the form section:option=value".format(s)) return section, option, value def multiprocessing_main(xxx_todo_changeme): # note the double parenthesis for map() convenience (config, command) = xxx_todo_changeme Main.from_object(config, command) return config.dispatcher.destination.retrieve() class Main(object): def __init__(self, config, command): if isinstance(config, util.ConfigSectionGroup): self.config = config.configfile.copy() elif isinstance(config, util.ConfigFile): self.config = config.copy() else: raise ValueError('Configfile is the wrong type') # distribute the configfile to space and to the metadata instance spaceconf = self.config.copy() #input from either the configfile or the configsectiongroup is valid self.dispatcher = backend.get_dispatcher(config.dispatcher, self, default='local') self.projection = backend.get_projection(config.projection) self.input = backend.get_input(config.input) self.dispatcher.config.destination.set_final_options(self.input.get_destination_options(command)) if 'limits' in self.config.projection: self.dispatcher.config.destination.set_limits(self.config.projection['limits']) if command: self.dispatcher.config.destination.set_config(spaceconf) self.run(command) @classmethod def from_args(cls, args): args = parse_args(args) if not os.path.exists(args.configfile): # wait up to 10 seconds if it is a zpi, it might take a while for the file to appear accross the network if not args.configfile.endswith('.zpi') or not util.wait_for_file(args.configfile, 10): raise errors.FileError("configuration file '{0}' does not exist".format(args.configfile)) configobj = False with open(args.configfile, 'rb') as fp: if fp.read(2) == '\x1f\x8b': # gzip marker fp.seek(0) configobj = util.zpi_load(fp) if not configobj: # reopen args.configfile as text configobj = util.ConfigFile.fromtxtfile(args.configfile, command=args.command, overrides=args.c) return cls(configobj, args.command) @classmethod def from_object(cls, config, command): config.command = command return cls(config, command) def run(self, command): if self.dispatcher.has_specific_task(): self.dispatcher.run_specific_task(command) else: jobs = self.input.generate_jobs(command) tokens = self.dispatcher.process_jobs(jobs) self.result = self.dispatcher.sum(tokens) if self.result is True: pass elif isinstance(self.result, space.EmptySpace): sys.stderr.write('error: output is an empty dataset\n') else: self.dispatcher.config.destination.store(self.result) def process_job(self, job): def generator(): res = self.projection.config.resolution labels = self.projection.get_axis_labels() for intensity, weights, params in self.input.process_job(job): coords = self.projection.project(*params) if self.projection.config.limits == None: yield space.Multiverse((space.Space.from_image(res, labels, coords, intensity, weights=weights), )) else: yield space.Multiverse(space.Space.from_image(res, labels, coords, intensity, weights=weights, limits=limits) for limits in self.projection.config.limits) jobverse = space.chunked_sum(generator(), chunksize=25) for sp in jobverse.spaces: if isinstance(sp, space.Space): sp.metadata.add_dataset(self.input.metadata) return jobverse def clone_config(self): config = util.ConfigSectionGroup() config.configfile = self.config config.dispatcher = self.dispatcher.config.copy() config.projection = self.projection.config.copy() config.input = self.input.config.copy() return config def get_reentrant(self): return multiprocessing_main class Split(Main): # completely ignores the dispatcher, just yields a space per image def __init__(self, config, command): self.command = command if isinstance(config, util.ConfigSectionGroup): self.config = config.configfile.copy() elif isinstance(config, util.ConfigFile): self.config = config.copy() else: raise ValueError('Configfile is the wrong type') #input from either the configfile or the configsectiongroup is valid self.projection = backend.get_projection(config.projection) self.input = backend.get_input(config.input) def process_job(self, job): res = self.projection.config.resolution labels = self.projection.get_axis_labels() for intensity, weights, params in self.input.process_job(job): coords = self.projection.project(*params) if self.projection.config.limits == None: yield space.Space.from_image(res, labels, coords, intensity, weights=weights) else: yield space.Multiverse(space.Space.from_image(res, labels, coords, intensity, weights=weights, limits=limits) for limits in self.projection.config.limits) def run(self): for job in self.input.generate_jobs(self.command): for verse in self.process_job(job): yield verse binoculars-0.0.4/binoculars/plot.py000066400000000000000000000170751343276063200173650ustar00rootroot00000000000000import numpy import matplotlib.colors import matplotlib.cm import mpl_toolkits.mplot3d # Adapted from http://www.ster.kuleuven.be/~pieterd/python/html/plotting/interactive_colorbar.html # which in turn is based on an example from http://matplotlib.org/users/event_handling.html class DraggableColorbar(object): def __init__(self, cbar, mappable): self.cbar = cbar self.mappable = mappable self.press = None self.cycle = sorted([i for i in dir(matplotlib.cm) if hasattr(getattr(matplotlib.cm, i), 'N')]) self.index = self.cycle.index(cbar.get_cmap().name) self.canvas = self.cbar.patch.figure.canvas def connect(self): self.cidpress = self.canvas.mpl_connect('button_press_event', self.on_press) self.cidrelease = self.canvas.mpl_connect('button_release_event', self.on_release) self.cidmotion = self.canvas.mpl_connect('motion_notify_event', self.on_motion) self.cidkeypress = self.canvas.mpl_connect('key_press_event', self.key_press) def disconnect(self): self.canvas.mpl_disconnect(self.cidpress) self.canvas.mpl_disconnect(self.cidrelease) self.canvas.mpl_disconnect(self.cidmotion) self.canvas.mpl_disconnect(self.cidkeypress) def on_press(self, event): if event.inaxes == self.cbar.ax: self.press = event.x, event.y def key_press(self, event): if event.key == 'down': self.index += 1 elif event.key == 'up': self.index -= 1 if self.index < 0: self.index = len(self.cycle) elif self.index >= len(self.cycle): self.index = 0 cmap = self.cycle[self.index] self.mappable.set_cmap(cmap) self.cbar.patch.figure.canvas.draw() def on_motion(self, event): if self.press is None or event.inaxes != self.cbar.ax: return xprev, yprev = self.press dx = event.x - xprev dy = event.y - yprev self.press = event.x, event.y if isinstance(self.cbar.norm, matplotlib.colors.LogNorm): scale = 0.999 * numpy.log10(self.cbar.norm.vmax / self.cbar.norm.vmin) if event.button == 1: self.cbar.norm.vmin *= scale**numpy.sign(dy) self.cbar.norm.vmax *= scale**numpy.sign(dy) elif event.button == 3: self.cbar.norm.vmin *= scale**numpy.sign(dy) self.cbar.norm.vmax /= scale**numpy.sign(dy) else: scale = 0.03 * (self.cbar.norm.vmax - self.cbar.norm.vmin) if event.button == 1: self.cbar.norm.vmin -= scale*numpy.sign(dy) self.cbar.norm.vmax -= scale*numpy.sign(dy) elif event.button == 3: self.cbar.norm.vmin -= scale*numpy.sign(dy) self.cbar.norm.vmax += scale*numpy.sign(dy) self.mappable.set_norm(self.cbar.norm) self.canvas.draw() def on_release(self, event): # force redraw on mouse release self.press = None self.mappable.set_norm(self.cbar.norm) self.canvas.draw() def get_clipped_norm(data, clipping=0.0, log=True): if hasattr(data, 'compressed'): data = data.compressed() else: data = data.flatten() if log: data = data[data > 0] if numpy.alen(data) == 0: return matplotlib.colors.LogNorm(1, 10) if clipping: chop = int(round(data.size * clipping)) clip = sorted(data)[chop:-(1+chop)] vmin, vmax = clip[0], clip[-1] else: vmin, vmax = data.min(), data.max() if log: return matplotlib.colors.LogNorm(vmin, vmax) else: return matplotlib.colors.Normalize(vmin, vmax) def plot(space, fig, ax, log=True, loglog=False, clipping=0.0, fit=None, norm=None, colorbar=True, labels=True, interpolation='nearest', **plotopts): if space.dimension == 1: data = space.get_masked() xrange = numpy.ma.array(space.axes[0][:], mask=data.mask) if fit is not None: if log: p1 = ax.semilogy(xrange, data, 'wo', **plotopts) p2 = ax.semilogy(xrange, fit, 'r', linewidth=2, **plotopts) elif loglog: p1 = ax.loglog(xrange, data, 'wo', **plotopts) p2 = ax.loglog(xrange, fit, 'r', linewidth=2, **plotopts) else: p1 = ax.plot(xrange, data, 'wo', **plotopts) p2 = ax.plot(xrange, fit, 'r', linewidth=2, **plotopts) else: if log: p1 = ax.semilogy(xrange, data, **plotopts) elif loglog: p1 = ax.loglog(xrange, data, **plotopts) else: p1 = ax.plot(xrange, data, **plotopts) p2 = [] if labels: ax.set_xlabel(space.axes[0].label) ax.set_ylabel('Intensity (a.u.)') return p1 + p2 elif space.dimension == 2: data = space.get_masked() # 2D IMSHOW PLOT xmin = space.axes[0].min xmax = space.axes[0].max ymin = space.axes[1].min ymax = space.axes[1].max if not norm: norm = get_clipped_norm(data, clipping, log) if fit is not None: im = ax.imshow(fit.transpose(), origin='lower', extent=(xmin, xmax, ymin, ymax), aspect='auto', norm=norm, interpolation=interpolation, **plotopts) else: im = ax.imshow(data.transpose(), origin='lower', extent=(xmin, xmax, ymin, ymax), aspect='auto', norm=norm, interpolation=interpolation, **plotopts) if labels: ax.set_xlabel(space.axes[0].label) ax.set_ylabel(space.axes[1].label) if colorbar: cbarwidget = fig.colorbar(im) fig._draggablecbar = DraggableColorbar(cbarwidget, im) # we need to store this instance somewhere fig._draggablecbar.connect() return im elif space.dimension == 3: if not isinstance(ax, mpl_toolkits.mplot3d.Axes3D): raise ValueError("For 3D plots, the 'ax' parameter must be an Axes3D instance (use for example gca(projection='3d') to get one)") cmap = getattr(matplotlib.cm, plotopts.pop('cmap', 'jet')) if norm is None: norm = get_clipped_norm(space.get_masked(), clipping, log) data = space.get() mask = numpy.bitwise_or(~numpy.isfinite(data), data == 0) gridx, gridy, gridz = tuple(grid[~mask] for grid in space.get_grid()) im = ax.scatter(gridx, gridy, gridz, c=cmap(norm(data[~mask])), marker=',' , alpha=0.7, linewidths=0) #p1 = ax.plot_surface(gridx[0,:,:], gridy[0,:,:], gridz[0,:,:], facecolors=cmap(norm(space.project(0).get_masked())), shade=False, cstride=1, rstride=1) #p2 = ax.plot_surface(gridx[:,-1,:], gridy[:,-1,:], gridz[:,-1,:], facecolors=cmap(norm(space.project(1).get_masked())), shade=False, cstride=1, rstride=1) #p3 = ax.plot_surface(gridx[:,:,0], gridy[:,:,0], gridz[:,:,0], facecolors=cmap(norm(space.project(2).get_masked())), shade=False, cstride=1, rstride=1) if labels: ax.set_xlabel(space.axes[0].label) ax.set_ylabel(space.axes[1].label) ax.set_zlabel(space.axes[2].label) if fig._draggablecbar: fig._draggablecbar.disconnect() return im elif space.dimension > 3: raise ValueError("Cannot plot 4 or higher dimensional spaces, use projections or slices to decrease dimensionality.") binoculars-0.0.4/binoculars/space.py000077500000000000000000001077121343276063200175030ustar00rootroot00000000000000from __future__ import unicode_literals import numbers import numpy import h5py import sys from itertools import chain from . import util, errors #python3 support PY3 = sys.version_info > (3,) if PY3: from functools import reduce basestring = (str,bytes) else: from itertools import izip as zip def silence_numpy_errors(): """Silence numpy warnings about zero division. Normal usage of Space() will trigger these warnings.""" numpy.seterr(divide='ignore', invalid='ignore') def sum_onto(a, axis): """Numpy convenience. Project all dimensions of an array onto an axis, i.e. apply sum() to all axes except the one given.""" for i in reversed(list(range(len(a.shape)))): if axis != i: a = a.sum(axis=i) return a class Axis(object): """Represents a single dimension finite discrete grid centered at 0. Important attributes: min lower bound max upper bound res step size / resolution label human-readable identifier min, max and res are floats, but internally only integer operations are used. In particular min = imin * res, max = imax * res """ def __init__(self, min, max, res, label=None): self.res = float(res) if isinstance(min, int): self.imin = min else: self.imin = int(numpy.floor(min / self.res)) if isinstance(max, int): self.imax = max else: self.imax = int(numpy.ceil(max / self.res)) self.label = label @property def max(self): return self.imax * self.res @property def min(self): return self.imin * self.res def __len__(self): return self.imax - self.imin + 1 def __iter__(self): return iter(self[index] for index in range(len(self))) def __getitem__(self, key): if isinstance(key, slice): if key.step is not None: raise IndexError('stride not supported') if key.start is None: start = 0 elif key.start < 0: raise IndexError('key out of range') elif isinstance(key.start, int): start = key.start else: raise IndexError('key start must be integer') if key.stop is None: stop = len(self) elif key.stop > len(self): raise IndexError('key out of range') elif isinstance(key.stop, int): stop = key.stop else: raise IndexError('slice stop must be integer') return self.__class__(self.imin + start, self.imin + stop - 1, self.res, self.label) elif isinstance(key, int) or isinstance(key, numpy.int64): if key >= len(self): # to support iteration raise IndexError('key out of range') return (self.imin + key) * self.res else: raise IndexError('unknown key {0!r}'.format(key)) def get_index(self, value): if isinstance(value, numbers.Number): intvalue = int(round(value / self.res)) if self.imin <= intvalue <= self.imax: return intvalue - self.imin raise ValueError('cannot get index: value {0} not in range [{1}, {2}]'.format(value, self.min, self.max)) elif isinstance(value, slice): if value.step is not None: raise IndexError('stride not supported') if value.start is None: start = None else: start = self.get_index(value.start) if value.stop is None: stop = None else: stop = self.get_index(value.stop) if start is not None and stop is not None and start > stop: start, stop = stop, start return slice(start, stop) else: intvalue = numpy.around(value / self.res).astype(int) if ((self.imin <= intvalue) & (intvalue <= self.imax)).all(): return intvalue - self.imin raise ValueError('cannot get indices, values from [{0}, {1}], axes range [{2}, {3}]'.format(value.min(), value.max(), self.min, self.max)) def __or__(self, other): # union operation if not isinstance(other, Axis): return NotImplemented if not self.is_compatible(other): raise ValueError('cannot unite axes with different resolution/label') return self.__class__(min(self.imin, other.imin), max(self.imax, other.imax), self.res, self.label) def __eq__(self, other): if not isinstance(other, Axis): return NotImplemented return self.res == other.res and self.imin == other.imin and self.imax == other.imax and self.label == other.label def __hash__(self): return hash(self.imin) ^ hash(self.imax) ^ hash(self.res) ^ hash(self.label) def is_compatible(self, other): if not isinstance(other, Axis): return False return self.res == other.res and self.label == other.label def __contains__(self, other): if isinstance(other, numbers.Number): return self.min <= other <= self.max elif isinstance(other, Axis): return self.is_compatible(other) and self.imin <= other.imin and self.imax >= other.imax def rebound(self, min, max): return self.__class__(min, max, self.res, self.label) def rebin(self, factor): # for integers the following relations hold: a // b == floor(a / b), -(-a // b) == ceil(a / b) new = self.__class__(self.imin // factor, -(-self.imax // factor), factor*self.res, self.label) return self.imin % factor, -self.imax % factor, new def __repr__(self): return '{0.__class__.__name__} {0.label} (min={0.min}, max={0.max}, res={0.res}, count={1})'.format(self, len(self)) def restrict(self, value): # Useful for plotting if isinstance(value, numbers.Number): if value < self.min: return self.min elif value > self.max: return self.max else: return value elif isinstance(value, slice): if value.step is not None: raise IndexError('stride not supported') if value.start is None: start = None else: start = self.restrict(value.start) if value.stop is None: stop = None if value.stop == self.max: stop = None else: stop = self.restrict(value.stop) if start is not None and stop is not None and start > stop: start, stop = stop, start return slice(start, stop) class Axes(object): """Luxurious tuple of Axis objects.""" def __init__(self, axes): self.axes = tuple(axes) if len(self.axes) > 1 and any(axis.label is None for axis in self.axes): raise ValueError('axis label is required for multidimensional space') def __iter__(self): return iter(self.axes) @property def dimension(self): return len(self.axes) @property def npoints(self): return numpy.array([len(ax) for ax in self.axes]).prod() @property def memory_size(self): # assuming double precision floats for photons, 32 bit integers for contributions return (8+4) * self.npoints @classmethod def fromfile(cls, filename): with util.open_h5py(filename, 'r') as fp: try: if 'axes' in fp and 'axes_labels' in fp: # oldest style, float min/max return cls(tuple(Axis(min, max, res, lbl) for ((min, max, res), lbl) in zip(fp['axes'], fp['axes_labels']))) elif 'axes' in fp: # new try: axes = tuple(Axis(int(imin), int(imax), res, lbl) for ((index, fmin, fmax, res, imin, imax), lbl) in zip(fp['axes'].values(), fp['axes'].keys())) return cls(tuple(axes[int(values[0])] for values in fp['axes'].values())) # reorder the axes to the way in which they were saved except ValueError: return cls(tuple(Axis(int(imin), int(imax), res, lbl) for ((imin, imax, res), lbl) in zip(fp['axes'].values(), fp['axes'].keys()))) else: # older style, integer min/max return cls(tuple(Axis(imin, imax, res, lbl) for ((imin, imax), res, lbl) in zip(fp['axes_range'], fp['axes_res'], fp['axes_labels']))) except (KeyError, TypeError) as e: raise errors.HDF5FileError('unable to load axes definition from HDF5 file {0}, is it a valid BINoculars file? (original error: {1!r})'.format(filename, e)) def tofile(self, filename): with util.open_h5py(filename, 'w') as fp: axes = fp.create_group('axes') for index, ax in enumerate(self.axes): axes.create_dataset(ax.label, data=[index, ax.min, ax.max, ax.res, ax.imin, ax.imax]) def toarray(self): return numpy.vstack(numpy.hstack([str(ax.imin), str(ax.imax), str(ax.res), ax.label]) for ax in self.axes) @classmethod def fromarray(cls, arr): return cls(tuple(Axis(int(imin), int(imax), float(res), lbl) for (imin, imax, res, lbl) in arr)) def index(self, obj): if isinstance(obj, Axis): return self.axes.index(obj) elif isinstance(obj, int) and 0 <= obj < len(self.axes): return obj elif isinstance(obj, basestring): label = obj.lower() matches = tuple(i for i, axis in enumerate(self.axes) if axis.label.lower() == label) if len(matches) == 0: raise ValueError('no matching axis found') elif len(matches) == 1: return matches[0] else: raise ValueError('ambiguous axis label {0}'.format(label)) else: raise ValueError('invalid axis identifier {0!r}'.format(obj)) def __contains__(self, obj): if isinstance(obj, Axis): return obj in self.axes elif isinstance(obj, int): return 0 <= obj < len(self.axes) elif isinstance(obj, basestring): label = obj.lower() return any(axis.label.lower() == label for axis in self.axes) else: raise ValueError('invalid axis identifier {0!r}'.format(obj)) def __len__(self): return len(self.axes) def __getitem__(self, key): return self.axes[key] def __eq__(self, other): if not isinstance(other, Axes): return NotImplemented return self.axes == other.axes def __ne__(self, other): if not isinstance(other, Axes): return NotImplemented return self.axes != other.axes def __repr__(self): return '{0.__class__.__name__} ({0.dimension} dimensions, {0.npoints} points, {1}) {{\n {2}\n}}'.format(self, util.format_bytes(self.memory_size), '\n '.join(repr(ax) for ax in self.axes)) def restricted_key(self, key): if len(key) == 0: return None if len(key) == len(self.axes): return tuple(ax.restrict(s) for s, ax in zip(key, self.axes)) else: raise IndexError('dimension mismatch') class EmptySpace(object): """Convenience object for sum() and friends. Treated as zero for addition. Does not share a base class with Space for simplicity.""" def __init__(self, config=None, metadata=None): self.config = config self.metadata = metadata def __add__(self, other): if not isinstance(other, Space) and not isinstance(other, EmptySpace): return NotImplemented return other def __radd__(self, other): if not isinstance(other, Space) and not isinstance(other, EmptySpace): return NotImplemented return other def __iadd__(self, other): if not isinstance(other, Space) and not isinstance(other, EmptySpace): return NotImplemented return other def tofile(self, filename): """Store EmptySpace in HDF5 file.""" with util.atomic_write(filename) as tmpname: with util.open_h5py(tmpname, 'w') as fp: fp.attrs['type'] = 'Empty' def __repr__(self): return '{0.__class__.__name__}'.format(self) class Space(object): """Main data-storing object in BINoculars. Data is represented on an n-dimensional rectangular grid. Per grid point, the number of photons (~ intensity) and the number of original data points (pixels) contribution is stored. Important attributes: axes Axes instances describing range and stepsizes of each of the dimensions photons n-dimension numpy float array, total intensity per grid point contribitions n-dimensional numpy integer array, number of original datapoints (pixels) per grid point dimension n""" def __init__(self, axes, config=None, metadata=None): if not isinstance(axes, Axes): self.axes = Axes(axes) else: self.axes = axes self.config = config self.metadata = metadata self.photons = numpy.zeros([len(ax) for ax in self.axes], order='C') self.contributions = numpy.zeros(self.photons.shape, order='C') @property def dimension(self): return self.axes.dimension @property def npoints(self): return self.photons.size @property def memory_size(self): """Returns approximate memory consumption of this Space. Only considers size of .photons and .contributions, does not take into account the overhead.""" return self.photons.nbytes + self.contributions.nbytes @property def config(self): """util.ConfigFile instance describing configuration file used to create this Space instance""" return self._config @config.setter def config(self, conf): if isinstance(conf, util.ConfigFile): self._config = conf elif not conf: self._config = util.ConfigFile() else: raise TypeError("'{0!r}' is not a util.ConfigFile".format(space)) @property def metadata(self): """util.MetaData instance describing metadata used to create this Space instance""" return self._metadata @metadata.setter def metadata(self, metadata): if isinstance(metadata, util.MetaData): self._metadata = metadata elif not metadata: self._metadata = util.MetaData() else: raise TypeError("'{0!r}' is not a util.MetaData".format(space)) def copy(self): """Returns a copy of self. Numpy data is not shared, but the Axes object is.""" new = self.__class__(self.axes, self.config, self.metadata) new.photons[:] = self.photons new.contributions[:] = self.contributions return new def get(self): """Returns normalized photon count.""" return self.photons/self.contributions def __repr__(self): return '{0.__class__.__name__} ({0.dimension} dimensions, {0.npoints} points, {1}) {{\n {2}\n}}'.format(self, util.format_bytes(self.memory_size), '\n '.join(repr(ax) for ax in self.axes)) def __getitem__(self, key): """Slicing only! space[-0.2:0.2, 0.9:1.1] does exactly what the syntax implies. Ellipsis operator '...' is not supported.""" newkey = self.get_key(key) newaxes = tuple(ax[k] for k, ax in zip(newkey, self.axes) if isinstance(ax[k], Axis)) if not newaxes: return self.photons[newkey] / self.contributions[newkey] newspace = self.__class__(newaxes, self.config, self.metadata) newspace.photons = self.photons[newkey].copy() newspace.contributions = self.contributions[newkey].copy() return newspace def get_key(self, key): # needed in the fitaid for visualising the interpolated data """Convert the n-dimensional interval described by key (as used by e.g. __getitem__()) from data coordinates to indices.""" if isinstance(key, numbers.Number) or isinstance(key, slice): if not len(self.axes) == 1: raise IndexError('dimension mismatch') else: key = [key] elif not (isinstance(key, tuple) or isinstance(key, list)) or not len(key) == len(self.axes): raise IndexError('dimension mismatch') return tuple(ax.get_index(k) for k, ax in zip(key, self.axes)) def project(self, axis, *more_axes): """Reduce dimensionality of Space by projecting onto 'axis'. All data (photons, contributions) is summed along this axis. axis the label of the axis or the index *more_axis also project on these axes""" index = self.axes.index(axis) newaxes = list(self.axes) newaxes.pop(index) newspace = self.__class__(newaxes, self.config, self.metadata) newspace.photons = self.photons.sum(axis=index) newspace.contributions = self.contributions.sum(axis=index) if more_axes: return newspace.project(more_axes[0], *more_axes[1:]) else: return newspace def slice(self, axis, key): """Single-axis slice. axis label or index of axis to slice key something like slice(lower_data_range, upper_data_range)""" axindex = self.axes.index(axis) newkey = list(slice(None) for ax in self.axes) newkey[axindex] = key return self.__getitem__(tuple(newkey)) def get_masked(self): """Returns photons/contributions, but with divide-by-zero's masked out.""" return numpy.ma.array(data=self.get(), mask=(self.contributions == 0)) def get_variance(self): return numpy.ma.array(data=1 / self.contributions, mask=(self.contributions == 0)) def get_grid(self): """Returns the data coordinates of each grid point, as n-tuple of n-dimensinonal arrays. Basically numpy.mgrid() in data coordinates.""" igrid = numpy.mgrid[tuple(slice(0, len(ax)) for ax in self.axes)] grid = tuple(numpy.array((grid + ax.imin) * ax.res) for grid, ax in zip(igrid, self.axes)) return grid def max(self, axis=None): """Returns maximum intensity.""" return self.get_masked().max(axis=axis) def argmax(self): """Returns data coordinates of grid point with maximum intensity.""" array = self.get_masked() return tuple(ax[key] for ax, key in zip(self.axes, numpy.unravel_index(numpy.argmax(array), array.shape))) def __add__(self, other): if isinstance(other, numbers.Number): new = self.copy() new.photons += other * self.contributions return new if not isinstance(other, Space): return NotImplemented if not len(self.axes) == len(other.axes) or not all(a.is_compatible(b) for (a, b) in zip(self.axes, other.axes)): raise ValueError('cannot add spaces with different dimensionality or resolution') new = self.__class__([a | b for (a, b) in zip(self.axes, other.axes)]) new += self new += other return new def __iadd__(self, other): if isinstance(other, numbers.Number): self.photons += other * self.contributions return self if not isinstance(other, Space): return NotImplemented if not len(self.axes) == len(other.axes) or not all(a.is_compatible(b) for (a, b) in zip(self.axes, other.axes)): raise ValueError('cannot add spaces with different dimensionality or resolution') if not all(other_ax in self_ax for (self_ax, other_ax) in zip(self.axes, other.axes)): return self.__add__(other) index = tuple(slice(self_ax.get_index(other_ax.min), self_ax.get_index(other_ax.min) + len(other_ax)) for (self_ax, other_ax) in zip(self.axes, other.axes)) self.photons[index] += other.photons self.contributions[index] += other.contributions self.metadata += other.metadata return self def __sub__(self, other): return self.__add__(other * -1) def __isub__(self, other): return self.__iadd__(other * -1) def __mul__(self, other): if isinstance(other, numbers.Number): new = self.__class__(self.axes, self.config, self.metadata) # we would like to keep 1/contributions as the variance # var(aX) = a**2var(X) new.photons = self.photons / other new.contributions = self.contributions / other**2 return new else: return NotImplemented def trim(self): """Reduce total size of Space by trimming zero-contribution data points on the boundaries.""" mask = self.contributions > 0 lims = (numpy.flatnonzero(sum_onto(mask, i)) for (i, ax) in enumerate(self.axes)) lims = tuple((i.min(), i.max()) for i in lims) self.axes = Axes(ax.rebound(min + ax.imin, max + ax.imin) for (ax, (min, max)) in zip(self.axes, lims)) slices = tuple(slice(min, max+1) for (min, max) in lims) self.photons = self.photons[slices].copy() self.contributions = self.contributions[slices].copy() def rebin(self, resolutions): """Change bin size. resolution n-tuple of floats, new resolution of each axis""" if not len(resolutions) == len(self.axes): raise ValueError('cannot rebin space with different dimensionality') if resolutions == tuple(ax.res for ax in self.axes): return self # gather data and transform coords = self.get_grid() intensity = self.get() weights = self.contributions return self.from_image(resolutions, labels, coords, intensity, weights) def reorder(self, labels): """Change order of axes.""" if not self.dimension == len(labels): raise ValueError('dimension mismatch') newindices = list(self.axes.index(label) for label in labels) new = self.__class__(tuple(self.axes[index] for index in newindices), self.config, self.metadata) new.photons = numpy.transpose(self.photons, axes=newindices) new.contributions = numpy.transpose(self.contributions, axes=newindices) return new def transform_coordinates(self, resolutions, labels, transformation): # gather data and transform coords = self.get_grid() transcoords = transformation(*coords) intensity = self.get() weights = self.contributions # get rid of invalid coords valid = reduce(numpy.bitwise_and, chain((numpy.isfinite(t) for t in transcoords)), (weights > 0)) transcoords = tuple(t[valid] for t in transcoords) return self.from_image(resolutions, labels, transcoords, intensity[valid], weights[valid]) def process_image(self, coordinates, intensity, weights): """Load image data into Space. coordinates n-tuple of data coordinate arrays intensity data intensity array weights weights array, supply numpy.ones_like(intensity) for equal weights""" if len(coordinates) != len(self.axes): raise ValueError('dimension mismatch between coordinates and axes') intensity = numpy.nan_to_num(intensity).flatten() # invalids can be handeled by setting weight to 0, this ensures the weights can do that weights = weights.flatten() indices = numpy.array(tuple(ax.get_index(coord) for (ax, coord) in zip(self.axes, coordinates))) for i in range(0, len(self.axes)): for j in range(i+1, len(self.axes)): indices[i, :] *= len(self.axes[j]) indices = indices.sum(axis=0).astype(int).flatten() photons = numpy.bincount(indices, weights=intensity * weights) contributions = numpy.bincount(indices, weights=weights) self.photons.ravel()[:photons.size] += photons self.contributions.ravel()[:contributions.size] += contributions @classmethod def from_image(cls, resolutions, labels, coordinates, intensity, weights, limits=None): """Create Space from image data. resolutions n-tuple of axis resolutions labels n-tuple of axis labels coordinates n-tuple of data coordinate arrays intensity data intensity array""" if limits is not None: invalid = numpy.zeros(intensity.shape).astype(numpy.bool) for coord, sl in zip(coordinates, limits): if sl.start is None and sl.stop is not None: invalid += coord > sl.stop elif sl.start is not None and sl.stop is None: invalid += coord < sl.start elif sl.start is not None and sl.stop is not None: invalid += numpy.bitwise_or(coord < sl.start, coord > sl.stop) if numpy.all(invalid == True): return EmptySpace() coordinates = tuple(coord[~invalid] for coord in coordinates) intensity = intensity[~invalid] weights = weights[~invalid] axes = tuple(Axis(coord.min(), coord.max(), res, label) for res, label, coord in zip(resolutions, labels, coordinates)) newspace = cls(axes) newspace.process_image(coordinates, intensity, weights) return newspace def tofile(self, filename): """Store Space in HDF5 file.""" with util.atomic_write(filename) as tmpname: with util.open_h5py(tmpname, 'w') as fp: fp.attrs['type'] = 'Space' self.config.tofile(fp) self.axes.tofile(fp) self.metadata.tofile(fp) fp.create_dataset('counts', self.photons.shape, dtype=self.photons.dtype, compression='gzip').write_direct(self.photons) fp.create_dataset('contributions', self.contributions.shape, dtype=self.contributions.dtype, compression='gzip').write_direct(self.contributions) @classmethod def fromfile(cls, file, key=None): """Load Space from HDF5 file. file filename string or h5py.Group instance key sliced (subset) loading, should be an n-tuple of slice()s in data coordinates""" try: with util.open_h5py(file, 'r') as fp: if 'type' in fp.attrs.keys(): if fp.attrs['type'] == 'Empty': return EmptySpace() axes = Axes.fromfile(fp) config = util.ConfigFile.fromfile(fp) metadata = util.MetaData.fromfile(fp) if key: if len(axes) != len(key): raise ValueError("dimensionality of 'key' does not match dimensionality of Space in HDF5 file {0}".format(file)) key = tuple(ax.get_index(k) for k, ax in zip(key, axes)) for index, sl in enumerate(key): if sl.start == sl.stop and sl.start is not None: raise KeyError('key results in empty space') axes = tuple(ax[k] for k, ax in zip(key, axes) if isinstance(k, slice)) else: key = Ellipsis space = cls(axes, config, metadata) try: fp['counts'].read_direct(space.photons, key) fp['contributions'].read_direct(space.contributions, key) except (KeyError, TypeError) as e: raise errors.HDF5FileError('unable to load Space from HDF5 file {0}, is it a valid BINoculars file? (original error: {1!r})'.format(file, e)) except IOError as e: raise errors.HDF5FileError("unable to open '{0}' as HDF5 file (original error: {1!r})".format(file, e)) return space class Multiverse(object): """A collection of spaces with basic support for addition. Only to be used when processing data. This makes it possible to process multiple limit sets in a combination of scans""" def __init__(self, spaces): self.spaces = list(spaces) @property def dimension(self): return len(self.spaces) def __add__(self, other): if not isinstance(other, Multiverse): return NotImplemented if not self.dimension == other.dimension: raise ValueError('cannot add multiverses with different dimensionality') return self.__class__(tuple(s + o for s, o in zip(self.spaces, other.spaces))) def __iadd__(self, other): if not isinstance(other, Multiverse): return NotImplemented if not self.dimension == other.dimension: raise ValueError('cannot add multiverses with different dimensionality') for index, o in enumerate(other.spaces): self.spaces[index] += o return self def tofile(self, filename): with util.atomic_write(filename) as tmpname: with util.open_h5py(tmpname, 'w') as fp: fp.attrs['type'] = 'Multiverse' for index, sp in enumerate(self.spaces): spacegroup = fp.create_group('space_{0}'.format(index)) sp.tofile(spacegroup) @classmethod def fromfile(cls, file): """Load Multiverse from HDF5 file.""" try: with util.open_h5py(file, 'r') as fp: if 'type' in fp.attrs: if fp.attrs['type'] == 'Multiverse': return cls(tuple(Space.fromfile(fp[label]) for label in fp)) else: raise TypeError('This is not a multiverse') else: raise TypeError('This is not a multiverse') except IOError as e: raise errors.HDF5FileError("unable to open '{0}' as HDF5 file (original error: {1!r})".format(file, e)) def __repr__(self): return '{0.__class__.__name__}\n{1}'.format(self, self.spaces) class EmptyVerse(object): """Convenience object for sum() and friends. Treated as zero for addition.""" def __add__(self, other): if not isinstance(other, Multiverse): return NotImplemented return other def __radd__(self, other): if not isinstance(other, Multiverse): return NotImplemented return other def __iadd__(self, other): if not isinstance(other, Multiverse): return NotImplemented return other def union_axes(axes): axes = tuple(axes) if len(axes) == 1: return axes[0] if not all(isinstance(ax, Axis) for ax in axes): raise TypeError('not all objects are Axis instances') if len(set(ax.res for ax in axes)) != 1 or len(set(ax.label for ax in axes)) != 1: raise ValueError('cannot unite axes with different resolution/label') mi = min(ax.min for ax in axes) ma = max(ax.max for ax in axes) first = axes[0] return first.__class__(mi, ma, first.res, first.label) def union_unequal_axes(axes): axes = tuple(axes) if len(axes) == 1: return axes[0] if not all(isinstance(ax, Axis) for ax in axes): raise TypeError('not all objects are Axis instances') if len(set(ax.label for ax in axes)) != 1: raise ValueError('cannot unite axes with different label') mi = min(ax.min for ax in axes) ma = max(ax.max for ax in axes) res = min(ax.res for ax in axes) # making it easier to use the sliderwidget otherwise this hase no meaning first = axes[0] return first.__class__(mi, ma, res, first.label) def sum(spaces): """Calculate sum of iterable of Space instances.""" spaces = tuple(space for space in spaces if not isinstance(space, EmptySpace)) if len(spaces) == 0: return EmptySpace() if len(spaces) == 1: return spaces[0] if len(set(space.dimension for space in spaces)) != 1: raise TypeError('dimension mismatch in spaces') first = spaces[0] axes = tuple(union_axes(space.axes[i] for space in spaces) for i in range(first.dimension)) newspace = first.__class__(axes) for space in spaces: newspace += space return newspace def verse_sum(verses): i = iter(M.spaces for M in verses) return Multiverse(sum(spaces) for spaces in zip(*i)) # hybrid sum() / __iadd__() def chunked_sum(verses, chunksize=10): """Calculate sum of iterable of Multiverse instances. Creates intermediate sums to avoid growing a large space at every summation. verses iterable of Multiverse instances chunksize number of Multiverse instances in each intermediate sum""" result = EmptyVerse() for chunk in util.grouper(verses, chunksize): result += verse_sum(M for M in chunk) return result def iterate_over_axis(space, axis, resolution=None): ax = space.axes[space.axes.index(axis)] if resolution: bins = get_bins(ax, resolution) for start, stop in zip(bins[:-1], bins[1:]): yield space.slice(axis, slice(start, stop)) else: for value in ax: yield space.slice(axis, value) def get_axis_values(axes, axis, resolution=None): ax = axes[axes.index(axis)] if resolution: bins = get_bins(ax, resolution) return (bins[:-1] + bins[1:]) / 2 else: return numpy.array(list(ax)) def iterate_over_axis_keys(axes, axis, resolution=None): axindex = axes.index(axis) ax = axes[axindex] k = [slice(None) for i in axes] if resolution: bins = get_bins(ax, resolution) for start, stop in zip(bins[:-1], bins[1:]): k[axindex] = slice(start, stop) yield k else: for value in ax: k[axindex] = value yield k def get_bins(ax, resolution): if float(resolution) < ax.res: raise ValueError('interval {0} to low, minimum interval is {1}'.format(resolution, ax.res)) mi, ma = ax.min, ax.max return numpy.linspace(mi, ma, numpy.ceil(1 / numpy.float(resolution) * (ma - mi))) def dstack(spaces, dindices, dlabel, dresolution): def transform(space, dindex): resolutions = list(ax.res for ax in space.axes) resolutions.append(dresolution) labels = list(ax.label for ax in space.axes) labels.append(dlabel) exprs = list(ax.label for ax in space.axes) exprs.append('ones_like({0}) * {1}'.format(labels[0], dindex)) transformation = util.transformation_from_expressions(space, exprs) return space.transform_coordinates(resolutions, labels, transformation) return sum(transform(space, dindex) for space, dindex in zip(spaces, dindices)) def axis_offset(space, label, offset): exprs = list(ax.label for ax in space.axes) index = space.axes.index(label) exprs[index] += '+ {0}'.format(offset) transformation = util.transformation_from_expressions(space, exprs) return space.transform_coordinates((ax.res for ax in space.axes), (ax.label for ax in space.axes), transformation) def bkgsubtract(space, bkg): if space.dimension == bkg.dimension: bkg.photons = bkg.photons * space.contributions / bkg.contributions bkg.photons[bkg.contributions == 0] = 0 bkg.contributions = space.contributions return space - bkg else: photons = numpy.broadcast_arrays(space.photons, bkg.photons)[1] contributions = numpy.broadcast_arrays(space.contributions, bkg.contributions)[1] bkg = Space(space.axes) bkg.photons = photons bkg.contributions = contributions return bkgsubtract(space, bkg) def make_compatible(spaces): if not numpy.alen(numpy.unique(len(space.axes) for space in spaces)) == 1: raise ValueError('cannot make spaces with different dimensionality compatible') ax0 = tuple(ax.label for ax in spaces[0].axes) resmax = tuple(numpy.vstack(tuple(ax.res for ax in space.reorder(ax0).axes) for space in spaces).max(axis=0)) resmin = tuple(numpy.vstack(tuple(ax.res for ax in space.reorder(ax0).axes) for space in spaces).min(axis=0)) if not resmax == resmin: print('Warning: Not all spaces have the same resolution. Resolution will be changed to: {0}'.format(resmax)) return tuple(space.reorder(ax0).rebin2(resmax) for space in spaces) binoculars-0.0.4/binoculars/util.py000077500000000000000000000774061343276063200173730ustar00rootroot00000000000000from __future__ import print_function, division import os import sys import gzip import itertools import random import inspect import time import copy import numpy import contextlib import argparse import h5py import glob from . import errors import struct import json import socket import binascii import re ### ARGUMENT HANDLING #python3 support PY3 = sys.version_info > (3,) if PY3: import pickle import io import configparser else: import StringIO as io import Queue as queue import cPickle as pickle import ConfigParser as configparser def as_string(text): if hasattr(text, "decode"): text = text.decode() return text class OrderedOperation(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): oops = getattr(namespace, 'ordered_operations', []) oops.append((self.dest, values)) setattr(namespace, 'ordered_operations', oops) def argparse_common_arguments(parser, *args): for arg in args: # (ORDERED) OPERATIONS if arg == 'project': parser.add_argument('-p', '--project', metavar='AXIS', action=OrderedOperation, help='project space on AXIS') elif arg == 'slice': parser.add_argument('--slice', nargs=2, metavar=('AXIS', 'START:STOP'), action=OrderedOperation, help="slice AXIS from START to STOP (replace minus signs by 'm')") elif arg == 'pslice': parser.add_argument('--pslice', nargs=2, metavar=('AXIS', 'START:STOP'), action=OrderedOperation, help="like slice, but also project on AXIS after slicing") elif arg == 'transform': parser.add_argument('--transform', metavar='VAR@RES=EXPR;VAR2@RES2=EXPR2;...', action=OrderedOperation, help='perform coordinate transformation, rebinning data on new axis named VAR with resolution RES defined by EXPR, example: Q@0.1=sqrt(H**2+K**2+L**2)') elif arg == 'rebin': parser.add_argument('--rebin', metavar='N,M,...', action=OrderedOperation, help='reduce binsize by factor N in first dimension, M in second, etc') # SUBTRACT elif arg == 'subtract': parser.add_argument('--subtract', metavar='SPACE', help='subtract SPACE from input file') # PRESENTATION elif arg == 'nolog': parser.add_argument('--nolog', action='store_true', help='do not use logarithmic axis') elif arg == 'clip': parser.add_argument('-c', '--clip', metavar='FRACTION', default=0.00, help='clip color scale to remove FRACTION datapoints') # OUTPUT elif arg == 'savepdf': parser.add_argument('-s', '--savepdf', action='store_true', help='save output as pdf, automatic file naming') elif arg == 'savefile': parser.add_argument('--savefile', metavar='FILENAME', help='save output as FILENAME, autodetect filetype') # ERROR! else: raise ValueError("unsupported argument '{0}'".format(arg)) def parse_transform_args(transform): for t in transform.split(';'): lhs, expr = t.split('=') ax, res = lhs.split('@') yield ax.strip(), float(res), expr.strip() def handle_ordered_operations(space, args, auto3to2=False): info = [] for command, opts in getattr(args, 'ordered_operations', []): if command == 'slice' or command == 'pslice': ax, key = opts axindex = space.axes.index(ax) axlabel = space.axes[axindex].label if ':' in key: start, stop = key.split(':') if start: start = float(start.replace('m', '-')) else: start = space.axes[axindex].min if stop: stop = float(stop.replace('m', '-')) else: stop = space.axes[axindex].max key = slice(start, stop) info.append('sliced in {0} from {1} to {2}'.format(axlabel, start, stop)) else: key = float(key.replace('m', '-')) info.append('sliced in {0} at {1}'.format(axlabel, key)) space = space.slice(axindex, key) if command == 'pslice': try: projectaxis = space.axes.index(ax) except ValueError: pass else: info.append('projected on {0}'.format(space.axes[projectaxis].label)) space = space.project(projectaxis) elif command == 'project': projectaxis = space.axes.index(opts) info.append('projected on {0}'.format(space.axes[projectaxis].label)) space = space.project(projectaxis) elif command == 'transform': labels, resolutions, exprs = list(zip(*parse_transform_args(opts))) transformation = transformation_from_expressions(space, exprs) info.append('transformed to {0}'.format(', '.join('{0} = {1}'.format(label, expr) for (label, expr) in zip(labels, exprs)))) space = space.transform_coordinates(resolutions, labels, transformation) elif command == 'rebin': if ',' in opts: factors = tuple(int(i) for i in opts.split(',')) else: factors = (int(opts),) * space.dimension space = space.rebin(factors) else: raise ValueError("unsported Ordered Operation '{0}'".format(command)) if auto3to2 and space.dimension == 3: # automatic projection on smallest axis projectaxis = numpy.argmin(space.photons.shape) info.append('projected on {0}'.format(space.axes[projectaxis].label)) space = space.project(projectaxis) return space, info ### STATUS LINES _status_line_length = 0 def status(line, eol=False): """Prints a status line to sys.stdout, overwriting the previous one. Set eol to True to append a newline to the end of the line""" global _status_line_length sys.stdout.write('\r{0}\r{1}'.format(' '*_status_line_length, line)) if eol: sys.stdout.write('\n') _status_line_length = 0 else: _status_line_length = len(line) sys.stdout.flush() def statusnl(line): """Shortcut for status(..., eol=True)""" return status(line, eol=True) def statuseol(): """Starts a new status line, keeping the previous one intact""" global _status_line_length _status_line_length = 0 sys.stdout.write('\n') sys.stdout.flush() def statuscl(): """Clears the status line, shortcut for status('')""" return status('') ### Dispatcher, projection and input finder def get_backends(): modules = glob.glob(os.path.join(os.path.dirname(__file__), 'backends', '*.py')) names = list() for module in modules: if not module.endswith('__init__.py'): names.append(os.path.splitext(os.path.basename(module))[0]) return names def get_projections(module): from . import backend return get_base(module, backend.ProjectionBase) def get_inputs(module): from . import backend return get_base(module, backend.InputBase) def get_dispatchers(): from . import dispatcher from inspect import isclass items = dir(dispatcher) options = [] for item in items: obj = getattr(dispatcher, item) if isclass(obj): if issubclass(obj, dispatcher.DispatcherBase): options.append(item) return options def get_base(modname, base): from inspect import isclass if modname not in get_backends(): raise KeyError("{0} is not an available backend".format(modname)) try: backends = __import__('backends.{0}'.format(modname), globals(), locals(), [], 1) except ImportError as e: raise ImportError("Unable to import module backends.{0}: {1}".format(modname, e)) backend = getattr(backends, modname) items = dir(backend) options = [] for item in items: obj = getattr(backend, item) if isclass(obj): if issubclass(obj, base): options.append(item) return options ### Dispatcher, projection and input configuration options finder def get_dispatcher_configkeys(classname): from . import dispatcher cls = getattr(dispatcher, classname) return get_configkeys(cls) def get_projection_configkeys(modname, classname): return get_backend_configkeys(modname, classname) def get_input_configkeys(modname, classname): return get_backend_configkeys(modname, classname) def get_backend_configkeys(modname, classname): backends = __import__('backends.{0}'.format(modname), globals(), locals(), [], 1) backend = getattr(backends, modname) cls = getattr(backend, classname) return get_configkeys(cls) def get_configkeys(cls): from inspect import getsource items = list() while hasattr(cls, 'parse_config'): code = getsource(cls.parse_config) for line in code.split('\n'): key = parse_configcode(line) if key: if key not in items: items.append(key) cls = cls.__base__ return items def parse_configcode(line): try: comment = '#'.join(line.split('#')[1:]) line = line.split('#')[0] index = line.index('config.pop') item = line[index:].split('\'')[1] if item == 'action': return # action is reserved for internal use! return item, comment except ValueError: pass ### CONFIGURATION MANAGEMENT def parse_range(r): if '-' in r: a, b = r.split('-') return list(range(int(a), int(b)+1)) elif r: return [int(r)] else: return [] def parse_multi_range(s): if not s: return s out = [] ranges = s.split(',') for r in ranges: out.extend(parse_range(r)) return out def parse_tuple(s, length=None, type=str): if not s: return s t = tuple(type(i) for i in s.split(',')) if length is not None and len(t) != length: raise ValueError('invalid tuple length: expected {0} got {1}'.format(length, len(t))) return t def parse_bool(s): l = s.lower() if l in ('1', 'true', 'yes', 'on'): return True elif l in ('0', 'false', 'no', 'off'): return False raise ValueError("invalid input for boolean: '{0}'".format(s)) def parse_pairs(s): if not s: return s limits = [] for lim in re.findall('\[(.*?)\]', s): parsed = [] for pair in re.split(',', lim): mi, ma = tuple(m.strip() for m in pair.split(':')) if mi == '' and ma == '': parsed.append(slice(None)) elif mi == '': parsed.append(slice(None, float(ma))) elif ma == '': parsed.append(slice(float(mi), None)) else: if float(ma) < float(mi): raise ValueError("invalid input. maximum is larger than minimum: '{0}'".format(s)) else: parsed.append(slice(float(mi), float(ma))) limits.append(parsed) return limits def limit_to_filelabel(s): return tuple('[{0}]'.format(lim.replace('-', 'm').replace(':', '-').replace(' ', '')) for lim in re.findall('\[(.*?)\]', s)) class MetaBase(object): def __init__(self, label=None, section=None): self.sections = [] if label is not None and section is not None: self.sections.append(label) setattr(self, label, section) elif label is not None: self.sections.append(label) setattr(self, label, dict()) def add_section(self, label, section=None): self.sections.append(label) if section is not None: setattr(self, label, section) else: setattr(self, label, dict()) def __repr__(self): str = '{0.__class__.__name__}{{\n'.format(self) for section in self.sections: str += ' [{}]\n'.format(section) s = getattr(self, section) for entry in s: str += ' {} = {}\n'.format(entry, s[entry]) str += '}\n' return str def copy(self): return copy.deepcopy(self) def serialize(self): sections = {} for section in self.sections: section_dict = {} attr = getattr(self, section) for key in list(attr.keys()): if isinstance(attr[key], numpy.ndarray): # to be able to include numpy arrays in the serialisation sio = io.StringIO() numpy.save(sio, attr[key]) sio.seek(0) section_dict[key] = binascii.b2a_hex(sio.read()) # hex codation is needed to let json work with the string else: section_dict[key] = attr[key] sections[section] = section_dict return json.dumps(sections) @classmethod def fromserial(cls, s): obj = cls() data = json.loads(s) for section in list(data.keys()): section_dict = data[section] for key in list(section_dict.keys()): if isinstance(section_dict[key], str): # find and replace all the numpy serialised objects if section_dict[key].startswith('934e554d505901004600'): # numpy marker sio = io.StringIO() sio.write(binascii.a2b_hex(section_dict[key])) sio.seek(0) section_dict[key] = numpy.load(sio) setattr(obj, section, data[section]) if section not in obj.sections: obj.sections.append(section) return obj class MetaData(object): # a collection of metadata objects def __init__(self): self.metas = [] def add_dataset(self, dataset): if not isinstance(dataset, MetaBase) and not isinstance(dataset, ConfigFile): raise ValueError('MetaBase instance expected') else: self.metas.append(dataset) def __add__(self, other): new = self.__class__() new += self new += other return new def __iadd__(self, other): self.metas.extend(other.metas) return self @classmethod def fromfile(cls, filename): if isinstance(filename, str): if not os.path.exists(filename): raise IOError('Error importing configuration file. filename {0} does not exist'.format(filename)) metadataobj = cls() with open_h5py(filename, 'r') as fp: try: metadata = fp['metadata'] except KeyError as e: metadata = [] # when metadata is not present, proceed without Error for label in metadata: meta = MetaBase() for section in list(metadata[label].keys()): group = metadata[label][section] setattr(meta, section, dict((key, group[key].value) for key in group)) meta.sections.append(section) metadataobj.metas.append(meta) return metadataobj def tofile(self, filename): with open_h5py(filename, 'w') as fp: metadata = fp.create_group('metadata') for meta in self.metas: label = find_unused_label('metasection', list(metadata.keys())) metabase = metadata.create_group(label) for section in meta.sections: sectiongroup = metabase.create_group(section) s = getattr(meta, section) for key in list(s.keys()): sectiongroup.create_dataset(key, data=s[key]) def __repr__(self): str = '{0.__class__.__name__}{{\n'.format(self) for meta in self.metas: for line in meta.__repr__().split('\n'): str += ' ' + line + '\n' str += '}\n' return str def serialize(self): return json.dumps(list(meta.serialize() for meta in self.metas)) @classmethod def fromserial(cls, s): obj = cls() for item in json.loads(s): obj.metas.append(MetaBase.fromserial(item)) return obj #Contains the unparsed config dicts class ConfigFile(MetaBase): def __init__(self, origin='n/a', command=[]): self.origin = origin self.command = command super(ConfigFile, self).__init__() self.sections = ['dispatcher', 'projection', 'input'] for section in self.sections: setattr(self, section, dict()) @classmethod def fromfile(cls, filename): if isinstance(filename, str): if not os.path.exists(filename): raise IOError('Error importing configuration file. filename {0} does not exist'.format(filename)) configobj = cls(str(filename)) with open_h5py(filename, 'r') as fp: try: config = fp['configuration'] if 'command' in config.attrs: configobj.command = json.loads(as_string(config.attrs['command'])) for section in config: if isinstance(config[section], h5py._hl.group.Group): # new setattr(configobj, section, dict((key, config[section][key].value) for key in config[section])) else: # old setattr(configobj, section, dict(config[section])) except KeyError as e: pass # when config is not present, proceed without Error return configobj @classmethod def fromtxtfile(cls, filename, command=[], overrides=[]): if not os.path.exists(filename): raise IOError('Error importing configuration file. filename {0} does not exist'.format(filename)) config = configparser.RawConfigParser() config.read(filename) for section, option, value in overrides: config.set(section, option, value) configobj = cls(filename, command=command) for section in configobj.sections: setattr(configobj, section, dict((k, v.split('#')[0].strip()) for (k, v) in config.items(section))) return configobj def tofile(self, filename): with open_h5py(filename, 'w') as fp: conf = fp.create_group('configuration') conf.attrs['origin'] = str(self.origin) conf.attrs['command'] = json.dumps(self.command) for section in self.sections: sectiongroup = conf.create_group(section) s = getattr(self, section) for key in list(s.keys()): sectiongroup.create_dataset(key, data=s[key]) def totxtfile(self, filename): with open(filename, 'w') as fp: fp.write('# Configurations origin: {}\n'.format(self.origin)) for section in self.sections: fp.write('[{}]\n'.format(section)) s = getattr(self, section) for entry in s: fp.write('{} = {}\n'.format(entry, s[entry])) def __repr__(self): str = super(ConfigFile, self).__repr__() str += 'origin = {0}\n'.format(self.origin) str += 'command = {0}'.format(','.join(self.command)) return str #contains one parsed dict, for distribution to dispatcher, input or projection class class ConfigSection(object): def __init__(self, **kwargs): self.__dict__.update(kwargs) def copy(self): return copy.deepcopy(self) #contains the parsed configsections class ConfigSectionGroup(object): def __init__(self, origin='n/a'): self.origin = origin self.sections = 'dispatcher', 'projection', 'input' for section in self.sections: setattr(self, section, ConfigSection()) self.configfile = ConfigFile() class ConfigurableObject(object): def __init__(self, config): if isinstance(config, ConfigSection): self.config = config elif not isinstance(config, dict): raise ValueError('expecting dict or Configsection, not: {0}'.format(type(config))) else: self.config = ConfigSection() try: allkeys = list(config.keys()) self.parse_config(config) except KeyError as exc: raise errors.ConfigError("Configuration option {0} is missing from the configuration file. Please specify this option in the configuration file".format(exc)) except Exception as exc: missing = set(key for key in allkeys if key not in list(self.config.__dict__.keys())) - set(config.keys()) exc.args = errors.addmessage(exc.args, ". Unable to parse configuration option '{0}'. The error can quite likely be solved by modifying the option in the configuration file.".format(','.join(missing))) raise for k in config: print('warning: unrecognized configuration option {0} for {1}'.format(k, self.__class__.__name__)) self.config.class_ = self.__class__ def parse_config(self, config): # every known option should be pop()'ed from config, converted to a # proper type and stored as property in self.config, for example: # self.config.foo = int(config.pop('foo', 1)) pass ### FILES def best_effort_atomic_rename(src, dest): if sys.platform == 'win32' and os.path.exists(dest): os.remove(dest) os.rename(src, dest) def filename_enumerator(filename, start=0): base, ext = os.path.splitext(filename) for count in itertools.count(start): yield '{0}_{2}{1}'.format(base, ext, count) def find_unused_filename(filename): if not os.path.exists(filename): return filename for f in filename_enumerator(filename, 2): if not os.path.exists(f): return f def label_enumerator(label, start=0): for count in itertools.count(start): yield '{0}_{1}'.format(label, count) def find_unused_label(label, labellist): for l in label_enumerator(label): if not l in labellist: return l def yield_when_exists(filelist, timeout=None): """Wait for files in 'filelist' to appear, for a maximum of 'timeout' seconds, yielding them in arbitrary order as soon as they appear. If 'filelist' is a set, it will be modified in place, and on timeout it will contain the files that have not appeared yet.""" if not isinstance(filelist, set): filelist = set(filelist) delay = loop_delayer(5) start = time.time() while filelist: next(delay) exists = set(f for f in filelist if os.path.exists(f)) for e in exists: yield e filelist -= exists if timeout is not None and time.time() - start > timeout: break def wait_for_files(filelist, timeout=None): """Wait until the files in 'filelist' have appeared, for a maximum of 'timeout' seconds. Returns True on success, False on timeout.""" filelist = set(filelist) for i in yield_when_exists(filelist, timeout): pass return not filelist def wait_for_file(file, timeout=None): return wait_for_files([file], timeout=timeout) def space_to_edf(space, filename): from PyMca import EdfFile header = {} for a in space.axes: header[str(a.label)] = '{0} {1} {2}'.format(a.min, a.max, a.res) edf = EdfFile.EdfFile(filename) edf.WriteImage(header, space.get_masked().filled(0), DataType="Float") def space_to_txt(space, filename): data = [coord.flatten() for coord in space.get_grid()] data.append(space.get_masked().filled(0).flatten()) data = numpy.array(data).T with open(filename, 'w') as fp: fp.write('\t'.join(ax.label for ax in space.axes)) fp.write('\tintensity\n') numpy.savetxt(fp, data, fmt='%.6g', delimiter='\t') @contextlib.contextmanager def open_h5py(fn, mode): if isinstance(fn, h5py._hl.group.Group): yield fn else: with h5py.File(fn, mode) as fp: if mode == 'w': fp.create_group('binoculars') yield fp['binoculars'] if mode == 'r': if 'binoculars' in fp: yield fp['binoculars'] else: yield fp ### VARIOUS def uniqid(): return '{0:08x}'.format(random.randint(0, 2**32-1)) def grouper(iterable, n): while True: chunk = list(itertools.islice(iterable, n)) if not chunk: break yield chunk _python_executable = None def register_python_executable(scriptname): global _python_executable _python_executable = sys.executable, scriptname def get_python_executable(): return _python_executable def chunk_slicer(count, chunksize): """yields slice() objects that split an array of length 'count' into equal sized chunks of at most 'chunksize'""" chunkcount = int(numpy.ceil(float(count) / chunksize)) realchunksize = int(numpy.ceil(float(count) / chunkcount)) for i in range(chunkcount): yield slice(i*realchunksize, min(count, (i+1)*realchunksize)) def cluster_jobs(jobs, target_weight): jobs = sorted(jobs, key=lambda job: job.weight) # we cannot split jobs here, so just yield away all jobs that are overweight or just right while jobs and jobs[-1].weight >= target_weight: yield [jobs.pop()] while jobs: cluster = [jobs.pop()] # take the biggest remaining job size = cluster[0].weight for i in range(len(jobs)-1, -1, -1): # and exhaustively search for all jobs that can accompany it (biggest first) if size + jobs[i].weight <= target_weight: size += jobs[i].weight cluster.append(jobs.pop(i)) yield cluster def cluster_jobs2(jobs, target_weight): """Taking the first n jobs that together add up to target_weight. Here as opposed to cluster_jobs the total number of jobs does not have to be known beforehand """ jobslist = [] for job in jobs: jobslist.append(job) if sum(j.weight for j in jobslist) >= target_weight: yield jobslist[:] jobslist = [] if len(jobslist) > 0: # yield the remainder of the jobs yield jobslist[:] def loop_delayer(delay): """Delay a loop such that it runs at most once every 'delay' seconds. Usage example: delay = loop_delayer(5) while some_condition: next(delay) do_other_tasks """ def generator(): polltime = 0 while 1: diff = time.time() - polltime if diff < delay: time.sleep(delay - diff) polltime = time.time() yield return generator() def transformation_from_expressions(space, exprs): def transformation(*coords): ns = dict((i, getattr(numpy, i)) for i in dir(numpy)) ns.update(**dict((ax.label, coord) for ax, coord in zip(space.axes, coords))) return tuple(eval(expr, ns) for expr in exprs) return transformation def format_bytes(bytes): units = 'kB', 'MB', 'GB', 'TB' exp = min(max(int(numpy.log(bytes) / numpy.log(1024.)), 1), 4) return '{0:.1f} {1}'.format(bytes / 1024**exp, units[exp-1]) ### GZIP PICKLING (zpi) # handle old zpi's def _pickle_translate(module, name): if module in ('__main__', 'ivoxoar.space') and name in ('Space', 'Axis'): return 'BINoculars.space', name return module, name if inspect.isbuiltin(pickle.Unpickler): # real cPickle: cannot subclass def _find_global(module, name): module, name = _pickle_translate(module, name) __import__(module) return getattr(sys.modules[module], name) def pickle_load(fileobj): unpickler = pickle.Unpickler(fileobj) unpickler.find_global = _find_global return unpickler.load() else: # pure python implementation class _Unpickler(pickle.Unpickler): def find_class(self, module, name): module, name = _pickle_translate(module, name) return pickle.Unpickler.find_class(self, module, name) def pickle_load(fileobj): unpickler = _Unpickler(fileobj) return unpickler.load() @contextlib.contextmanager def atomic_write(filename): """Atomically write data into 'filename' using a temporary file and os.rename() Rename on success, clean up on failure (any exception). Example: with atomic_write(filename) as tmpfile with open(tmpfile, 'w') as fp: fp.write(...) """ if isinstance(filename, h5py._hl.group.Group): yield filename else: tmpfile = '{0}-{1}.tmp'.format(os.path.splitext(filename)[0], uniqid()) try: yield tmpfile except: raise else: best_effort_atomic_rename(tmpfile, filename) finally: if os.path.exists(tmpfile): os.remove(tmpfile) def zpi_save(obj, filename): with atomic_write(filename) as tmpfile: fp = gzip.open(tmpfile, 'wb') try: pickle.dump(obj, fp, pickle.HIGHEST_PROTOCOL) finally: fp.close() def zpi_load(filename): if hasattr(filename, 'read'): fp = gzip.GzipFile(filename.name, fileobj=filename) else: fp = gzip.open(filename, 'rb') try: return pickle_load(fp) finally: fp.close() def serialize(space, command): # first 48 bytes contain length of the message, whereby the first 8 give the length of the command, the second 8 the length of the configfile etc.. message = io.StringIO() message.write(struct.pack('QQQQQQ', 0, 0, 0, 0, 0, 0)) message.write(command) commandlength = message.len - 48 message.write(space.config.serialize()) configlength = message.len - commandlength - 48 message.write(space.metadata.serialize()) metalength = message.len - configlength - commandlength - 48 numpy.save(message, space.axes.toarray()) arraylength = message.len - metalength - configlength - commandlength - 48 numpy.save(message, space.photons) photonlength = message.len - arraylength - metalength - configlength - commandlength - 48 numpy.save(message, space.contributions) contributionlength = message.len - photonlength - arraylength - metalength - configlength - commandlength - 48 message.seek(0) message.write(struct.pack('QQQQQQ', commandlength, configlength, metalength, arraylength, photonlength, contributionlength)) message.seek(0) return message def packet_slicer(length, size=1024): # limit the communication to 1024 bytes while length > size: length -= size yield size yield length def socket_send(ip, port, mssg): try: mssglengths = struct.unpack('QQQQQQ', mssg.read(48)) # the lengths of all the components mssg.seek(0) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((ip, port)) sock.send(mssg.read(48)) for l in mssglengths: for packet in packet_slicer(l): sock.send(mssg.read(packet)) sock.close() except socket.error: # in case of failure to send. The data will be saved anyway so any loss of communication unfortunate but not critical pass def socket_recieve(RequestHandler): # pass one the handler to deal with incoming data def get_msg(length): msg = io.StringIO() for packet in packet_slicer(length): p = RequestHandler.request.recv(packet, socket.MSG_WAITALL) # wait for full mssg msg.write(p) if msg.len != length: raise errors.CommunicationError('recieved message is too short. expected length {0}, recieved length {1}'.format(length, msg.len)) msg.seek(0) return msg command, config, metadata, axes, photons, contributions = tuple(get_msg(msglength) for msglength in struct.unpack('QQQQQQ', RequestHandler.request.recv(48, socket.MSG_WAITALL))) return command.read(), config.read(), metadata.read(), numpy.load(axes), numpy.load(photons), numpy.load(contributions) binoculars-0.0.4/debian/000077500000000000000000000000001343276063200151045ustar00rootroot00000000000000binoculars-0.0.4/debian/changelog000066400000000000000000000011471343276063200167610ustar00rootroot00000000000000binoculars (0.0.3-2) UNRELEASED; urgency=medium * Imported upstream v0.0.4 -- System User Mon, 18 Feb 2019 15:00:45 +0100 binoculars (0.0.3-1) unstable; urgency=medium * Imported upstream v0.0.3. * d/watch: Updated to target Github release tags. * d/control: Homepage point to Github.com/picca/binoculars. -- Picca Frédéric-Emmanuel Fri, 07 Dec 2018 11:55:41 +0100 binoculars (0.0.2-1) unstable; urgency=medium * Initial release (Closes: #910077) -- Picca Frédéric-Emmanuel Wed, 25 Nov 2015 14:25:10 +0200 binoculars-0.0.4/debian/compat000066400000000000000000000000021343276063200163020ustar00rootroot000000000000009 binoculars-0.0.4/debian/control000066400000000000000000000072001343276063200165060ustar00rootroot00000000000000Source: binoculars Maintainer: Debian Science Maintainers Uploaders: Picca Frédéric-Emmanuel Section: science Priority: optional Build-Depends: debhelper (>= 9), dh-python, gir1.2-hkl-5.0, python3-all, python3-numpy, python3-pyfai, python3-setuptools, python3-sphinx, python3-tables, Standards-Version: 4.1.2 Vcs-Browser: https://salsa.debian.org/science-team/binoculars Vcs-Git: https://salsa.debian.org/science-team/binoculars.git Homepage: https://github.com/picca/binoculars Package: binoculars Architecture: all Section: python Depends: python3-binoculars (>= ${source:Version}), ${misc:Depends}, ${python3:Depends} Description: Surface X-ray diffraction 2D detector data reduction BINoculars is a tool for data reduction and analysis of large sets of surface diffraction data that have been acquired with a two-dimensional X-ray detector. The intensity of each pixel of a two-dimensional detector is projected onto a three-dimensional grid in reciprocal-lattice coordinates using a binning algorithm. This allows for fast acquisition and processing of high-resolution data sets and results in a significant reduction of the size of the data set. The subsequent analysis then proceeds in reciprocal space. It has evolved from the specific needs of the ID03 beamline at the ESRF, but it has a modular design and can be easily adjusted and extended to work with data from other beamlines or from other measurement techniques. Package: python3-binoculars Architecture: all Section: python Depends: gir1.2-hkl-5.0, ${misc:Depends}, ${python3:Depends} Suggests: python3-xrayutilities Description: Surface X-ray diffraction 2D detector data reduction - Python3 BINoculars is a tool for data reduction and analysis of large sets of surface diffraction data that have been acquired with a two-dimensional X-ray detector. The intensity of each pixel of a two-dimensional detector is projected onto a three-dimensional grid in reciprocal-lattice coordinates using a binning algorithm. This allows for fast acquisition and processing of high-resolution data sets and results in a significant reduction of the size of the data set. The subsequent analysis then proceeds in reciprocal space. It has evolved from the specific needs of the ID03 beamline at the ESRF, but it has a modular design and can be easily adjusted and extended to work with data from other beamlines or from other measurement techniques. . This is the Python 3 version of the package. Package: binoculars-doc Architecture: all Section: doc Depends: ${misc:Depends}, ${sphinxdoc:Depends} Built-Using: ${sphinxdoc:Built-Using} Description: Surface X-ray diffraction 2D detector data reduction - Documentation BINoculars is a tool for data reduction and analysis of large sets of surface diffraction data that have been acquired with a two-dimensional X-ray detector. The intensity of each pixel of a two-dimensional detector is projected onto a three-dimensional grid in reciprocal-lattice coordinates using a binning algorithm. This allows for fast acquisition and processing of high-resolution data sets and results in a significant reduction of the size of the data set. The subsequent analysis then proceeds in reciprocal space. It has evolved from the specific needs of the ID03 beamline at the ESRF, but it has a modular design and can be easily adjusted and extended to work with data from other beamlines or from other measurement techniques. . This is the common documentation package. binoculars-0.0.4/debian/copyright000066400000000000000000000026141343276063200170420ustar00rootroot00000000000000Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: binoculars Source: https://github.com/id03/binoculars/releases Files: * Copyright: 2012-2015 European Synchrotron Radiation Facility Willem Onderwaater Sander Roobol 2015-2018 Synchrotron SOLEIL Frédéric-Emmanuel Picca License: GPL-3.0+ Files: debian/* Copyright: 2015 Picca Frédéric-Emmanuel License: GPL-3.0+ License: GPL-3.0+ This package is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. . This package is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. . You should have received a copy of the GNU General Public License along with this program. If not, see . On Debian systems, the complete text of the GNU General Public License version 3 can be found in "/usr/share/common-licenses/GPL-3". License: public-domain You can use this free for any purpose. It's in the public domain. It has no warranty binoculars-0.0.4/debian/gbp.conf000066400000000000000000000000401343276063200165150ustar00rootroot00000000000000[DEFAULT] debian-branch = masterbinoculars-0.0.4/debian/py3dist-overrides000066400000000000000000000000241343276063200204220ustar00rootroot00000000000000pyqt5 python3-pyqt5 binoculars-0.0.4/debian/rules000077500000000000000000000011321343276063200161610ustar00rootroot00000000000000#!/usr/bin/make -f export DH_VERBOSE=1 export PYBUILD_NAME=binoculars export PYBUILD_AFTER_INSTALL=rm -rf {destdir}/usr/bin/ %: dh $@ --with python3,sphinxdoc --buildsystem=pybuild override_dh_install: dh_numpy3 dh_install # install scripts into binoculars python3 setup.py install_scripts -d debian/binoculars/usr/bin override_dh_sphinxdoc: ifeq (,$(findstring nodocs, $(DEB_BUILD_OPTIONS))) PYTHONPATH=. http_proxy='127.0.0.1:9' sphinx-build -N -bhtml doc/source build/html # HTML generator dh_installdocs -p binoculars-doc "build/html" dh_sphinxdoc -O--buildsystem=pybuild endif binoculars-0.0.4/debian/watch000066400000000000000000000002621343276063200161350ustar00rootroot00000000000000version=4 opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*)\.tar\.gz%binoculars-$1.tar.gz%" \ https://github.com/picca/binoculars/tags \ (?:.*?/)?v?(\d[\d.]*)\.tar\.gz debian uupdatebinoculars-0.0.4/doc/000077500000000000000000000000001343276063200144275ustar00rootroot00000000000000binoculars-0.0.4/doc/Makefile000066400000000000000000000152031343276063200160700ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/binoculars.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/binoculars.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/binoculars" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/binoculars" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." binoculars-0.0.4/doc/source/000077500000000000000000000000001343276063200157275ustar00rootroot00000000000000binoculars-0.0.4/doc/source/api/000077500000000000000000000000001343276063200165005ustar00rootroot00000000000000binoculars-0.0.4/doc/source/api/binoculars.backends.rst000066400000000000000000000020251343276063200231430ustar00rootroot00000000000000binoculars.backends package =========================== Submodules ---------- binoculars.backends.bm25 module ------------------------------- .. automodule:: binoculars.backends.bm25 :members: :undoc-members: :show-inheritance: binoculars.backends.example module ---------------------------------- .. automodule:: binoculars.backends.example :members: :undoc-members: :show-inheritance: binoculars.backends.id03 module ------------------------------- .. automodule:: binoculars.backends.id03 :members: :undoc-members: :show-inheritance: binoculars.backends.id03_xu module ---------------------------------- .. automodule:: binoculars.backends.id03_xu :members: :undoc-members: :show-inheritance: binoculars.backends.sixs module ------------------------------- .. automodule:: binoculars.backends.sixs :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: binoculars.backends :members: :undoc-members: :show-inheritance: binoculars-0.0.4/doc/source/api/binoculars.rst000066400000000000000000000025351343276063200214000ustar00rootroot00000000000000binoculars package ================== Subpackages ----------- .. toctree:: binoculars.backends Submodules ---------- binoculars.backend module ------------------------- .. automodule:: binoculars.backend :members: :undoc-members: :show-inheritance: binoculars.dispatcher module ---------------------------- .. automodule:: binoculars.dispatcher :members: :undoc-members: :show-inheritance: binoculars.errors module ------------------------ .. automodule:: binoculars.errors :members: :undoc-members: :show-inheritance: binoculars.fit module --------------------- .. automodule:: binoculars.fit :members: :undoc-members: :show-inheritance: binoculars.main module ---------------------- .. automodule:: binoculars.main :members: :undoc-members: :show-inheritance: binoculars.plot module ---------------------- .. automodule:: binoculars.plot :members: :undoc-members: :show-inheritance: binoculars.space module ----------------------- .. automodule:: binoculars.space :members: :undoc-members: :show-inheritance: binoculars.util module ---------------------- .. automodule:: binoculars.util :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: binoculars :members: :undoc-members: :show-inheritance: binoculars-0.0.4/doc/source/api/modules.rst000066400000000000000000000001021343276063200206730ustar00rootroot00000000000000. = .. toctree:: :maxdepth: 4 binoculars setup test binoculars-0.0.4/doc/source/api/setup.rst000066400000000000000000000001521343276063200203700ustar00rootroot00000000000000setup module ============ .. automodule:: setup :members: :undoc-members: :show-inheritance: binoculars-0.0.4/doc/source/api/test.rst000066400000000000000000000010201343276063200202020ustar00rootroot00000000000000test package ============ Submodules ---------- test.cfg module --------------- .. automodule:: test.cfg :members: :undoc-members: :show-inheritance: test.id03 module ---------------- .. automodule:: test.id03 :members: :undoc-members: :show-inheritance: test.metadata module -------------------- .. automodule:: test.metadata :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: test :members: :undoc-members: :show-inheritance: binoculars-0.0.4/doc/source/conf.py000066400000000000000000000202511343276063200172260ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # binoculars documentation build configuration file, created by # sphinx-quickstart on Wed Nov 25 15:03:57 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.join(os.path.abspath('.'), os.pardir, os.pardir)) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'binoculars' copyright = u'2015, Willem Onderwaater, Sander Roobol\\' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.0.1' # The full version, including alpha/beta/rc tags. release = '0.0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'binocularsdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'binoculars.tex', u'binoculars Documentation', u'Willem Onderwaater, Sander Roobol\\textbackslash{}', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'binoculars', u'binoculars Documentation', [u'Willem Onderwaater, Sander Roobol\\'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'binoculars', u'binoculars Documentation', u'Willem Onderwaater, Sander Roobol\\', 'binoculars', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False binoculars-0.0.4/doc/source/index.rst000066400000000000000000000010361343276063200175700ustar00rootroot00000000000000.. binoculars documentation master file, created by sphinx-quickstart on Wed Nov 25 15:03:57 2015. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to binoculars's documentation! ====================================== Contents: .. toctree:: :maxdepth: 2 readme api/binoculars api/binoculars.backends api/test api/modules Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` binoculars-0.0.4/doc/source/readme.rst000066400000000000000000000037041343276063200177220ustar00rootroot00000000000000BINoculars ========== BINoculars is a tool for data reduction and analysis of large sets of surface diffraction data that have been acquired with a 2D X-ray detector. The intensity of each pixel of a 2D-detector is projected onto a 3-dimensional grid in reciprocal lattice coordinates using a binning algorithm. This allows for fast acquisition and processing of high-resolution datasets and results in a significant reduction of the size of the dataset. The subsequent analysis then proceeds in reciprocal space. It has evolved from the specific needs of the ID03 beamline at the ESRF, but it has a modular design and can be easily adjusted and extended to work with data from other beamlines or from other measurement techniques. This work has been [published](http://dx.doi.org/10.1107/S1600576715009607) with open access in the Journal of Applied Crystallography Volume 48, Part 4 (August 2015) ## Installation Grab the [latest sourcecode as zip](https://github.com/id03/binoculars/archive/master.zip) or clone the Git repository. Run `binoculars.py`, `fitaid.py`, `gui.py` or `processgui.py` directly from the command line. ## Usage The [BINoculars wiki](https://github.com/id03/binoculars/wiki) contains a detailed tutorial to get started. ## Scripting If you want more complex operations than offered by the command line or GUI tools, you can manipulate BINoculars data directly from Python. Some examples with detailed comments can be found in the [repository](https://github.com/id03/binoculars/tree/master/examples/scripts). The API documentation on the `BINoculars` and `BINoculars.space` modules can be accessed via pydoc, e.g. run `pydoc -w BINoculars BINoculars.space` to generate HTML files. ## Extending BINoculars If you want to use BINoculars with your beamline, you need to write a `backend` module. The code contains an [example implementation](https://github.com/id03/binoculars/blob/master/BINoculars/backends/example.py) with many hints and comments. binoculars-0.0.4/examples/000077500000000000000000000000001343276063200155005ustar00rootroot00000000000000binoculars-0.0.4/examples/configs/000077500000000000000000000000001343276063200171305ustar00rootroot00000000000000binoculars-0.0.4/examples/configs/example_config_bm25000066400000000000000000000024561343276063200226670ustar00rootroot00000000000000### To process measurements from BM25 this configuration is needed # typically one would execute: # python2 binoculars/binoculars.py process config_bm25 ### the DISPATCHER is responsible for job management [dispatcher] type = local # SingleCore # local # SingleCore can be aborted by Ctrl+c # ncores = 4 # optionally, specify number of cores (autodetect by default) # specify destination file using scan numbers destination=/data/visitor/XYZ/spaces/sample_{first}.hdf5 overwrite = true ### choose an appropriate INPUT class and specify custom options [input] type = bm25:eh2scd # refers to class EH2SCD in BINoculars/backends/bm25.py imagefile = /data/visitor/XYZ/bm25/sample_{scannr:03d}_*.edf ## approximate number of images per job target_weight = 50 # technical details for this particular input class centralpixel = 1892.0,1357.5 xmask = 1600-2200 # full size = 0-3824 ymask = 700-1300 # full size = 0-1911 sddy_offset= -1288.5 # sample detector distance Y offset sddx_offset= 0 # detector X offset sddz_offset= 0 # detector Z offset ccdth_offset= -0.05 pixelsize=0.06552, 0.06552 # in microns ### choose PROJECTION plus resolution [projection] type = bm25:qprojection # refers to QProjection in BINoculars/backends/bm25.py resolution = 0.004,0.001,0.002 # or just give one number for all binoculars-0.0.4/examples/configs/example_config_example000066400000000000000000000015741343276063200235550ustar00rootroot00000000000000### the DISPATCHER is responsible for job management [dispatcher] type = local # run locally ncores = 1 # optionally, specify number of cores (autodetect by default) # specificy destination file using scan numbers destination= test_{first}.hdf5 overwrite = true ### choose an appropriate INPUT class and specify custom options [input] type = example:input # refers to class Input in BINoculars/backends/example.py ## approximate number of images per job, only useful when running on the oar cluster target_weight = 4000 # technical details for this particular input class wavelength = 0.5 centralpixel = 50,50 sdd=636 #sample detector distance pixelsize=0.055, 0.055 ### choose PROJECTION plus resolution [projection] type = example:qprojection # refers to qprojection in BINoculars/backends/example.py ## for L-scans (previous values) resolution = 0.01 # or just give 1 number for all binoculars-0.0.4/examples/configs/example_config_id03000066400000000000000000000026511343276063200226560ustar00rootroot00000000000000### the DISPATCHER is responsible for job management [dispatcher] type = local # run locally #ncores = 4 # optionally, specify number of cores (autodetect by default) # to use the OAR cluster: #type = oar #tmpdir = /some/globally/available/path #oarsub_options = walltime=0:15 # optionally, tweak oarsub parameters #executable = python /data/id03/inhouse/binoculars/binoculars.py # optionally, override default location of python and/or BINoculars installation # specificy destination file using scan numbers destination = demo_{first}-{last}.hdf5 overwrite = true # or, by default: numbered files in the form output_###.hdf5: # destination = output.hdf5 # overwrite = false ### choose an appropriate INPUT class and specify custom options [input] type = id03:eh1 # refers to class EH1 in BINoculars/backends/id03.py specfile = /path/to/data/file.spec imagefolder = /path/to/data/images/{rUCCD[0]}/ ## approximate number of images per job, only useful when running on the oar cluster target_weight = 4000 # technical yadayada for this particular input class centralpixel = 40, 255 # x,y sdd = 1050 # sample to detector distance (mm) pixelsize = 0.055, 0.055 # pixel size x/y (mm) ymask = 185-253,262-400 xmask = 50-235 ### choose PROJECTION plus resolution [projection] type = id03:hklprojection # refers to HKLProjection in BINoculars/backends/id03.py resolution = 0.002, 0.002, 1 # or just give 1 number for all dimensions binoculars-0.0.4/examples/configs/example_config_id03_xrayutilities000066400000000000000000000023561343276063200256570ustar00rootroot00000000000000### To process measurements from ID03 using xrayutilities in the reciprocal space conversion this configuration is needed # typically one would execute: # python2 binoculars/binoculars.py process config_id03_xu ### the DISPATCHER is responsible for job management [dispatcher] type = local # run locally # ncores = 4 # optionally, specify number of cores (autodetect by default) # specificy destination file using scan numbers destination=/path/to/output/spaces/XYZ_xu_{first}-{last}.hdf5 overwrite = true ### choose an appropriate INPUT class and specify custom options [input] type = id03_xu:eh2 # refers to class EH2 in BINoculars/backends/id03_xu.py specfile=/DIRECTORY/hc1434/sixc_hc1434.spec imagefolder = /path/to/data/images/{rUCCD[0]} ## approximate number of images per job, only useful when running on the oar cluster target_weight = 4000 # technical details for this particular input class centralpixel = 366,328 ymask = 80-500 xmask = 182-500 sdd=636 #sample detector distance pixelsize=0.055, 0.055 ### choose PROJECTION plus resolution [projection] type = id03_xu:hklprojection # refers to HKLProjection in BINoculars/backends/id03_xu.py ## for L-scans (previous values) resolution = 0.002, 0.002, 0.0017 # or just give 1 number for all binoculars-0.0.4/examples/configs/example_config_io7000066400000000000000000000030311343276063200226060ustar00rootroot00000000000000### the DISPATCHER is responsible for job management [dispatcher] type = singlecore # run locally #ncores = 2 # optionally, specify number of cores (autodetect by default) #send_to_gui = true #host = 160.103.228.145 #port = 55294 # to use the OAR cluster: #type = oar #tmpdir = /some/globally/available/path #oarsub_options = walltime=0:15 # optionally, tweak oarsub parameters #executable = /path/to/custom/python /path/to/ivoxprocess # optionally, override default location of python and/or ivoxoar installation # specificy destination file using scan numbers destination = mesh_{first}_{last}.hdf5 overwrite = true # or, by default: numbered files in the form output_###.zpi: # destination = output.zpi #overwrite = false ### choose an appropriate INPUT class and specify custom options [input] type = io7:eh1 # refers to class EH1 in ivoxoar/backends/id03.py #specfile = test.spec datafilefolder = /home/willem/Documents/PhD/diamond imagefolder = /home/willem/Documents/PhD/diamond #wait_for_data = True ## approximate number of images per job, only useful when running on the oar cluster target_weight = 500 # technical yadayada for this particular input class centralpixel = 92, 215 xmask=130-330 ymask=14-165 pixelsize = 0.172, 0.172 sdd = 897 ### choose PROJECTION plus resolution [projection] #type = io7:gammadelta #resolution=0.01 type = io7:hklprojection # refers to HKLProjection in ivoxoar/backends/id03.py resolution = 0.001, 0.001, 0.001# or just give 1 number for all dimensions #limits = [:0,-1:,:], [0:,:-1,:], [:0,:-1,:], [0:,-1:,:] binoculars-0.0.4/examples/scripts/000077500000000000000000000000001343276063200171675ustar00rootroot00000000000000binoculars-0.0.4/examples/scripts/binoculars.mac000066400000000000000000000071351343276063200220200ustar00rootroot00000000000000global projection, resolution, binoculars_host, binoculars_port, configfilename binoculars_host = "160.103.228.220" binoculars_port = "58395" configfilename = "/users/onderwaa/ma2249/config_ma2249.txt" def binoculars '{ if ($# == 0){ print ("Usage: command (this works only for a number or variable, if you want to sepecify a range (140-150) use binoculars_str)") exit } local params, args n=split("$*",args) destination = (binoculars_host ":" binoculars_port) params["configfilename"] = configfilename params["command"] = $1 if (resolution != 0 ){ params["resolution"] = resolution } if (projection != 0){ params["projection"] = projection } for (i=1; i 1: print('only one space file argument is support with extractconfig -> using the first') config = binoculars.util.ConfigFile.fromfile(args.infile[0]) config.totxtfile(args.output) else: for f in args.infile: try: axes = binoculars.space.Axes.fromfile(f) except Exception as e: print(('{0}: unable to load Space: {1!r}'.format(f, e))) else: print(('{0} \n{1!r}'.format(f, axes))) if args.config: try: config = binoculars.util.ConfigFile.fromfile(f) except Exception as e: print(('{0}: unable to load util.ConfigFile: {1!r}'.format(f, e))) else: print(('{!r}'.format(config))) # CONVERT def command_convert(args): parser = argparse.ArgumentParser(prog='binoculars convert') parser.add_argument('--wait', action='store_true', help='wait for input files to appear') binoculars.util.argparse_common_arguments(parser, 'project', 'slice', 'pslice', 'rebin', 'transform', 'subtract') parser.add_argument('--read-trusted-zpi', action='store_true', help='read legacy .zpi files, ONLY FROM A TRUSTED SOURCE!') parser.add_argument('infile', help='input file, must be a .hdf5') parser.add_argument('outfile', help='output file, can be .hdf5 or .edf or .txt') args = parser.parse_args(args) if args.wait: binoculars.util.statusnl('waiting for {0} to appear'.format(args.infile)) binoculars.util.wait_for_file(args.infile) binoculars.util.statusnl('processing...') if args.infile.endswith('.zpi'): if not args.read_trusted_zpi: print('error: .zpi files are unsafe, use --read-trusted-zpi to open') sys.exit(1) space = binoculars.util.zpi_load(args.infile) else: space = binoculars.space.Space.fromfile(args.infile) ext = os.path.splitext(args.outfile)[-1] if args.subtract: space -= binoculars.space.Space.fromfile(args.subtract) space, info = binoculars.util.handle_ordered_operations(space, args) if ext == '.edf': binoculars.util.space_to_edf(space, args.outfile) print('saved at {0}'.format(args.outfile)) elif ext == '.txt': binoculars.util.space_to_txt(space, args.outfile) print('saved at {0}'.format(args.outfile)) elif ext == '.hdf5': space.tofile(args.outfile) print('saved at {0}'.format(args.outfile)) else: sys.stderr.write('unknown extension {0}, unable to save!\n'.format(ext)) sys.exit(1) # PLOT def command_plot(args): import matplotlib.pyplot as pyplot import binoculars.fit import binoculars.plot parser = argparse.ArgumentParser(prog='binoculars plot') parser.add_argument('infile', nargs='+') binoculars.util.argparse_common_arguments(parser, 'savepdf', 'savefile', 'clip', 'nolog', 'project', 'slice', 'pslice', 'subtract', 'rebin', 'transform') parser.add_argument('--multi', default=None, choices=('grid', 'stack')) parser.add_argument('--fit', default=None) parser.add_argument('--guess', default=None) args = parser.parse_args(args) if args.subtract: subtrspace = binoculars.space.Space.fromfile(args.subtract) subtrspace, subtrinfo = binoculars.util.handle_ordered_operations(subtrspace, args, auto3to2=True) args.nolog = True guess = [] if args.guess is not None: for n in args.guess.split(','): guess.append(float(n.replace('m', '-'))) # PLOTTING AND SIMPLEFITTING pyplot.figure(figsize=(12, 9)) plotcount = len(args.infile) plotcolumns = int(numpy.ceil(numpy.sqrt(plotcount))) plotrows = int(numpy.ceil(float(plotcount) / plotcolumns)) for i, filename in enumerate(args.infile): space = binoculars.space.Space.fromfile(filename) space, info = binoculars.util.handle_ordered_operations(space, args, auto3to2=True) fitdata = None if args.fit: fit = binoculars.fit.get_class_by_name(args.fit)(space, guess) print(fit) if fit.success: fitdata = fit.fitdata if plotcount > 1: if space.dimension == 1 and args.multi is None: args.multi = 'stack' if space.dimension == 2 and args.multi != 'grid': if args.multi is not None: sys.stderr.write('warning: stack display not supported for multi-file-plotting, falling back to grid\n') args.multi = 'grid' # elif space.dimension == 3: # not reached, project_and_slice() guarantees that elif space.dimension > 3: sys.stderr.write('error: cannot display 4 or higher dimensional data, use --project or --slice to decrease dimensionality\n') sys.exit(1) if args.subtract: space -= subtrspace basename = os.path.splitext(os.path.basename(filename))[0] if args.multi == 'grid': pyplot.subplot(plotrows, plotcolumns, i+1) binoculars.plot.plot(space, pyplot.gcf(), pyplot.gca(), label=basename, log=not args.nolog, clipping=float(args.clip), fit=fitdata) if plotcount > 1 and args.multi == 'grid': pyplot.gca().set_title(basename) if plotcount == 1: label = basename else: label = '{0} files'.format(plotcount) if args.subtract: label = '{0} (subtracted {1})'.format(label, os.path.splitext(os.path.basename(args.subtract))[0]) if plotcount > 1 and args.multi == 'stack': pyplot.legend() pyplot.suptitle('{0}, {1}'.format(label, ' '.join(info))) if args.savepdf or args.savefile: if args.savefile: pyplot.savefig(args.savefile) else: filename = '{0}_plot.pdf'.format(os.path.splitext(args.infile[0])[0]) filename = binoculars.util.find_unused_filename(filename) pyplot.savefig(filename) else: pyplot.show() # FIT def command_fit(args): import matplotlib.pyplot as pyplot import binoculars.fit import binoculars.plot parser = argparse.ArgumentParser(prog='binoculars fit') parser.add_argument('infile') parser.add_argument('axis') parser.add_argument('resolution') parser.add_argument('func') parser.add_argument('--follow', action='store_true', help='use the result of the previous fit as guess for the next') binoculars.util.argparse_common_arguments(parser, 'savepdf', 'savefile', 'clip', 'nolog') args = parser.parse_args(args) axes = binoculars.space.Axes.fromfile(args.infile) axindex = axes.index(args.axis) ax = axes[axindex] axlabel = ax.label if float(args.resolution) < ax.res: raise ValueError('interval {0} to low, minimum interval is {1}'.format(args.resolution, ax.res)) mi, ma = ax.min, ax.max bins = numpy.linspace(mi, ma, numpy.ceil(1 / numpy.float(args.resolution) * (ma - mi)) + 1) parameters = [] variance = [] fitlabel = [] guess = None basename = os.path.splitext(os.path.basename(args.infile))[0] if args.savepdf or args.savefile: if args.savefile: filename = binoculars.util.filename_enumerator(args.savefile) else: filename = binoculars.util.filename_enumerator('{0}_fit.pdf'.format(basename)) fitclass = binoculars.fit.get_class_by_name(args.func) for start, stop in zip(bins[:-1], bins[1:]): info = [] key = [slice(None) for i in axes] key[axindex] = slice(start, stop) newspace = binoculars.space.Space.fromfile(args.infile, key) left, right = newspace.axes[axindex].min, newspace.axes[axindex].max if newspace.dimension == axes.dimension: newspace = newspace.project(axindex) fit = fitclass(newspace, guess) paramnames = fit.parameters print(fit) if fit.success: fitlabel.append(numpy.mean([start, stop])) parameters.append(fit.result) variance.append(fit.variance) if args.follow and not fit.variance[0] == float(0): guess = fit.result else: guess = None fit = fit.fitdata else: fit = None guess = None print(guess) if args.savepdf or args.savefile: if len(newspace.get_masked().compressed()): if newspace.dimension == 1: pyplot.figure(figsize=(12, 9)) pyplot.subplot(111) binoculars.plot.plot(newspace, pyplot.gcf(), pyplot.gca(), label=basename, log=not args.nolog, clipping=float(args.clip), fit=fit) elif newspace.dimension == 2: pyplot.figure(figsize=(12, 9)) pyplot.subplot(121) binoculars.plot.plot(newspace, pyplot.gcf(), pyplot.gca(), label=basename, log=not args.nolog, clipping=float(args.clip), fit=None) pyplot.subplot(122) binoculars.plot.plot(newspace, pyplot.gcf(), pyplot.gca(), label=basename, log=not args.nolog, clipping=float(args.clip), fit=fit) info.append('sliced in {0} from {1} to {2}'.format(axlabel, left, right)) pyplot.suptitle('{0}'.format(' '.join(info))) pyplot.savefig(next(filename)) pyplot.close() parameters = numpy.vstack(n for n in parameters).T variance = numpy.vstack(n for n in variance).T pyplot.figure(figsize=(9, 4 * parameters.shape[0] + 2)) for i in range(parameters.shape[0]): pyplot.subplot(parameters.shape[0], 1, i) pyplot.plot(fitlabel, parameters[i, :]) if paramnames[i] in ['I']: pyplot.semilogy() pyplot.xlabel(paramnames[i]) pyplot.suptitle('fit summary of {0}'.format(args.infile)) if args.savepdf or args.savefile: if args.savefile: root, ext = os.path.split(args.savefile) pyplot.savefig('{0}_summary{1}'.format(root, ext)) print('saved at {0}_summary{1}'.format(root, ext)) filename = '{0}_summary{1}'.format(root, '.txt') else: pyplot.savefig('{0}_summary.pdf'.format(os.path.splitext(args.infile)[0])) print('saved at {0}_summary.pdf'.format(os.path.splitext(args.infile)[0])) filename = '{0}_summary.txt'.format(os.path.splitext(args.infile)[0]) file = open(filename, 'w') file.write('L\t') file.write('\t'.join(paramnames)) file.write('\n') for n in range(parameters.shape[1]): file.write('{0}\t'.format(fitlabel[n])) file.write('\t'.join(numpy.array(parameters[:, n], dtype=numpy.str))) file.write('\n') file.close() # PROCESS def command_process(args): import binoculars.main binoculars.util.register_python_executable(__file__) binoculars.main.Main.from_args(args) # start of main thread # SUBCOMMAND ARGUMENT HANDLING def usage(msg=''): print("""usage: binoculars COMMAND ... {1} available commands: convert mathematical operations & file format conversions info basic information on Space in .hdf5 file fit crystal truncation rod fitting plot 1D & 2D plotting (parts of) Space and basic fitting process data crunching / binning run binoculars COMMAND --help more info on that command """.format(sys.argv[0], msg)) sys.exit(1) if __name__ == '__main__': binoculars.space.silence_numpy_errors() subcommands = {'info': command_info, 'convert': command_convert, 'plot': command_plot, 'fit': command_fit, 'process': command_process} if len(sys.argv) < 2: usage() subcommand = sys.argv[1] if subcommand in ('-h', '--help'): usage() if subcommand not in subcommands: usage("binoculars error: unknown command '{0}'\n".format(subcommand)) subcommands[sys.argv[1]](sys.argv[2:]) binoculars-0.0.4/scripts/binoculars-fitaid000077500000000000000000001505641343276063200207110ustar00rootroot00000000000000#!/usr/bin/env python import sys import os.path import itertools import h5py import matplotlib.figure import matplotlib.image import numpy from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT from matplotlib.pyplot import Rectangle from PyQt5.Qt import (Qt) from PyQt5.QtCore import (pyqtSignal) from PyQt5.QtWidgets import (QAction, QApplication, QSlider, QMenuBar, QTabWidget, QFileDialog, QStatusBar, QMessageBox, QRadioButton, QButtonGroup, QCheckBox, QPushButton, QHBoxLayout, QVBoxLayout, QSplitter, QTableWidgetItem, QTableWidget, QLabel, QLineEdit, QMainWindow, QWidget, QComboBox, QProgressDialog, QDoubleSpinBox) from scipy.interpolate import griddata def set_src(): import sys import os.path as osp dirpath = osp.join(osp.dirname(osp.abspath(__file__)), osp.pardir) sys.path.insert(0, osp.abspath(dirpath)) try: import binoculars.main import binoculars.space import binoculars.plot import binoculars.fit import binoculars.util except ImportError: # try to use code from src distribution set_src() import binoculars.main import binoculars.space import binoculars.plot import binoculars.fit import binoculars.util class Window(QMainWindow): def __init__(self, parent=None): super(Window, self).__init__(parent) newproject = QAction("New project", self) newproject.triggered.connect(self.newproject) loadproject = QAction("Open project", self) loadproject.triggered.connect(self.loadproject) addspace = QAction("Import space", self) addspace.triggered.connect(self.add_to_project) menu_bar = QMenuBar() file = menu_bar.addMenu("&File") file.addAction(newproject) file.addAction(loadproject) file.addAction(addspace) self.setMenuBar(menu_bar) self.statusbar = QStatusBar() self.tab_widget = QTabWidget(self) self.tab_widget.setTabsClosable(True) self.tab_widget.tabCloseRequested.connect(self.tab_widget.removeTab) self.setCentralWidget(self.tab_widget) self.setMenuBar(menu_bar) self.setStatusBar(self.statusbar) def newproject(self): dialog = QFileDialog(self, "project filename"); dialog.setNameFilters(['binoculars fit file (*.fit)']); dialog.setDefaultSuffix('fit'); dialog.setFileMode(QFileDialog.AnyFile); dialog.setAcceptMode(QFileDialog.AcceptSave); if not dialog.exec_(): return fname = dialog.selectedFiles()[0] if not fname: return try: widget = TopWidget(str(fname), parent=self) self.tab_widget.addTab(widget, short_filename(str(fname))) self.tab_widget.setCurrentWidget(widget) except Exception as e: QMessageBox.critical(self, 'New project', 'Unable to save project to {}: {}'.format(fname, e)) def loadproject(self, filename=None): if not filename: dialog = QFileDialog(self, "Load project"); dialog.setNameFilters(['binoculars fit file (*.fit)']); dialog.setFileMode(QFileDialog.ExistingFiles); dialog.setAcceptMode(QFileDialog.AcceptOpen); if not dialog.exec_(): return fname = dialog.selectedFiles()[0] if not fname: return try: widget = TopWidget(str(fname), parent=self) self.tab_widget.addTab(widget, short_filename(str(fname))) self.tab_widget.setCurrentWidget(widget) except Exception as e: QMessageBox.critical(self, 'Load project', 'Unable to load project from {}: {}'.format(fname, e)) else: widget = TopWidget(str(fname), parent=self) self.tab_widget.addTab(widget, 'fname') self.tab_widget.setCurrentWidget(widget) def add_to_project(self, filename=None): if self.tab_widget.count() == 0: QMessageBox.warning(self, 'Warning', 'First select a file to store data') self.newproject() if not filename: dialog = QFileDialog(self, "Import spaces"); dialog.setNameFilters(['binoculars space file (*.hdf5)']); dialog.setFileMode(QFileDialog.ExistingFiles); dialog.setAcceptMode(QFileDialog.AcceptOpen); if not dialog.exec_(): return fname = dialog.selectedFiles() if not fname: return for name in fname: try: widget = self.tab_widget.currentWidget() widget.addspace(str(name)) except Exception as e: QMessageBox.critical(self, 'Import spaces', 'Unable to import space {}: {}'.format(fname, e)) else: widget = self.tab_widget.currentWidget() widget.addspace(filename) class TopWidget(QWidget): def __init__(self, filename, parent=None): super(TopWidget, self).__init__(parent) hbox = QHBoxLayout() vbox = QVBoxLayout() minihbox = QHBoxLayout() minihbox2 = QHBoxLayout() self.database = FitData(filename) self.table = TableWidget(self.database) self.nav = ButtonedSlider() self.nav.slice_index.connect(self.index_change) self.table.trigger.connect(self.active_change) self.table.check_changed.connect(self.refresh_plot) self.tab_widget = QTabWidget() self.fitwidget = FitWidget(self.database, self) self.integratewidget = IntegrateWidget(self.database, self) self.plotwidget = OverviewWidget(self.database, self) self.peakwidget = PeakWidget(self.database, self) self.tab_widget.addTab(self.fitwidget, 'Fit') self.tab_widget.addTab(self.integratewidget, 'Integrate') self.tab_widget.addTab(self.plotwidget, 'plot') self.tab_widget.addTab(self.peakwidget, 'Peaktracker') self.emptywidget = QWidget() self.emptywidget.setLayout(vbox) vbox.addWidget(self.table) vbox.addWidget(self.nav) self.functions = list() self.function_box = QComboBox() for function in dir(binoculars.fit): cls = getattr(binoculars.fit, function) if isinstance(cls, type) and issubclass(cls, binoculars.fit.PeakFitBase): self.functions.append(cls) self.function_box.addItem(function) self.function_box.setCurrentIndex(self.function_box.findText('PolarLorentzian2D')) vbox.addWidget(self.function_box) vbox.addLayout(minihbox) vbox.addLayout(minihbox2) self.all_button = QPushButton('fit all') self.rod_button = QPushButton('fit rod') self.slice_button = QPushButton('fit slice') self.all_button.clicked.connect(self.fit_all) self.rod_button.clicked.connect(self.fit_rod) self.slice_button.clicked.connect(self.fit_slice) minihbox.addWidget(self.all_button) minihbox.addWidget(self.rod_button) minihbox.addWidget(self.slice_button) self.allint_button = QPushButton('int all') self.rodint_button = QPushButton('int rod') self.sliceint_button = QPushButton('int slice') self.allint_button.clicked.connect(self.int_all) self.rodint_button.clicked.connect(self.int_rod) self.sliceint_button.clicked.connect(self.int_slice) minihbox2.addWidget(self.allint_button) minihbox2.addWidget(self.rodint_button) minihbox2.addWidget(self.sliceint_button) splitter = QSplitter(Qt.Horizontal) splitter.addWidget(self.emptywidget) splitter.addWidget(self.tab_widget) self.tab_widget.currentChanged.connect(self.tab_change) hbox.addWidget(splitter) self.setLayout(hbox) def tab_change(self, index): if index == 2: self.refresh_plot() def addspace(self, filename=None): if filename == None: filename = str(QFileDialog.getOpenFileName(self, 'Open Project', '.', '*.hdf5')) self.table.addspace(filename) def active_change(self): rodkey, axis, resolution = self.table.currentkey() newdatabase = RodData(self.database.filename, rodkey, axis, resolution) self.integratewidget.database = newdatabase self.peakwidget.database = newdatabase self.integratewidget.set_axis() self.peakwidget.set_axis() self.fitwidget.database = newdatabase self.nav.set_length(newdatabase.rodlength()) index = newdatabase.load('index') if index == None: index = 0 self.nav.set_index(index) self.index_change(index) def index_change(self, index): if index == None: index = 0 self.fitwidget.database.save('index', self.nav.index()) self.fitwidget.plot(index) self.integratewidget.plot(index) def refresh_plot(self): self.plotwidget.refresh(list(RodData(self.database.filename, rodkey, axis, resolution) for rodkey, axis, resolution in self.table.checked())) @property def fitclass(self): return self.functions[self.function_box.currentIndex()] def fit_slice(self): index = self.nav.index() space = self.fitwidget.database.space_from_index(index) self.fitwidget.fit(index, space, self.fitclass) self.fit_loc(self.fitwidget.database) self.fitwidget.plot(index) def fit_rod(self): def function(index, space): self.fitwidget.fit(index, space, self.fitclass) self.progressbox(self.fitwidget.database.rodkey, function, enumerate(self.fitwidget.database), self.fitwidget.database.rodlength()) self.fit_loc(self.fitwidget.database) self.fitwidget.plot() def fit_all(self): def function(index, space): self.fitwidget.fit(index, space, self.fitclass) for rodkey, axis, resolution in self.table.checked(): self.fitwidget.database = RodData(self.database.filename, rodkey, axis, resolution) self.progressbox(self.fitwidget.database.rodkey, function, enumerate(self.fitwidget.database), self.fitwidget.database.rodlength()) self.fit_loc(self.fitwidget.database) self.fitwidget.plot() def int_slice(self): index = self.nav.index() space = self.fitwidget.database.space_from_index(index) self.integratewidget.integrate(index, space) self.integratewidget.plot(index) def int_rod(self): self.progressbox(self.integratewidget.database.rodkey, self.integratewidget.integrate, enumerate(self.integratewidget.database), self.integratewidget.database.rodlength()) self.integratewidget.plot() def int_all(self): for rodkey, axis, resolution in self.table.checked(): self.integratewidget.database = RodData(self.database.filename, rodkey, axis, resolution) self.progressbox(self.integratewidget.database.rodkey, self.integratewidget.integrate, enumerate(self.integratewidget.database), self.integratewidget.database.rodlength()) self.integratewidget.plot() def fit_loc(self, database): deg = 2 for param in database.all_attrkeys(): if param.startswith('loc'): x, y = database.all_from_key(param) x, yvar = database.all_from_key('var_{0}'.format(param)) cx = x[numpy.invert(y.mask)] y = y.compressed() yvar = yvar.compressed() w = numpy.log(1 / yvar) w[w == numpy.inf] = 0 w = numpy.nan_to_num(w) w[w < 0] = 0 w[w < numpy.median(w)] = 0 if len(x) > 0: c = numpy.polynomial.polynomial.polyfit(cx, y, deg, w=w) newy = numpy.polynomial.polynomial.polyval(x, c) for index, newval in enumerate(newy): database.save_sliceattr(index, 'guessloc{0}'.format(param.lstrip('loc')), newval) def progressbox(self, rodkey, function, iterator, length): pd = QProgressDialog('Processing {0}'.format(rodkey), 'Cancel', 0, length) pd.setWindowModality(Qt.WindowModal) pd.show() def progress(index, item): pd.setValue(index) if pd.wasCanceled(): raise KeyboardInterrupt QApplication.processEvents() function(*item) for index, item in enumerate(iterator): progress(index, item) pd.close() class TableWidget(QWidget): trigger = pyqtSignal() check_changed = pyqtSignal() def __init__(self, database, parent=None): super(TableWidget, self).__init__(parent) hbox = QHBoxLayout() self.database = database self.activeindex = 0 self.table = QTableWidget(0, 5) self.table.setHorizontalHeaderLabels(['', 'rod', 'axis', 'res', 'remove']) self.table.cellClicked.connect(self.setlength) for index, width in enumerate([25, 150, 40, 50, 70]): self.table.setColumnWidth(index, width) for filename, rodkey in zip(database.filelist, database.rods()): self.addspace(filename, rodkey) hbox.addWidget(self.table) self.setLayout(hbox) def addspace(self, filename, rodkey=None): def remove_callback(rodkey): return lambda: self.remove(rodkey) def activechange_callback(index): return lambda: self.setlength(index, 1) if rodkey == None: rodkey = short_filename(filename) if rodkey in self.database.rods(): newkey = find_unused_rodkey(rodkey, self.database.rods()) self.database.copy(rodkey, newkey) rodkey = newkey old_axis, old_resolution = self.database.load(rodkey, 'axis'), self.database.load(rodkey, 'resolution') self.database.create_rod(rodkey, filename) index = self.table.rowCount() self.table.insertRow(index) axes = binoculars.space.Axes.fromfile(filename) checkboxwidget = QCheckBox() checkboxwidget.rodkey = rodkey checkboxwidget.setChecked(0) self.table.setCellWidget(index, 0, checkboxwidget) checkboxwidget.clicked.connect(self.check_changed) item = QTableWidgetItem(rodkey) self.table.setItem(index, 1, item) axis = QComboBox() for ax in axes: axis.addItem(ax.label) self.table.setCellWidget(index, 2, axis) if not old_axis == None: self.table.cellWidget(index, 2).setCurrentIndex(axes.index(old_axis)) elif index > 0: self.table.cellWidget(0, 2).setCurrentIndex(self.table.cellWidget(0, 2).currentIndex()) resolution = QLineEdit() if not old_resolution == None: resolution.setText(str(old_resolution)) elif index > 0: resolution.setText(self.table.cellWidget(0, 3).text()) else: resolution.setText(str(axes[axes.index(str(axis.currentText()))].res)) resolution.editingFinished.connect(activechange_callback(index)) self.table.setCellWidget(index, 3, resolution) buttonwidget = QPushButton('remove') buttonwidget.clicked.connect(remove_callback(rodkey)) self.table.setCellWidget(index, 4, buttonwidget) def remove(self, rodkey): table_rodkeys = list(self.table.cellWidget(index, 0).rodkey for index in range(self.table.rowCount())) for index, label in enumerate(table_rodkeys): if rodkey == label: self.table.removeRow(index) self.database.delete_rod(rodkey) print('removed: {0}'.format(rodkey)) def setlength(self, y, x=1): if x == 1: self.activeindex = y rodkey, axis, resolution = self.currentkey() self.database.save(rodkey, 'axis', axis) self.database.save(rodkey, 'resolution', resolution) self.trigger.emit() def currentkey(self): rodkey = self.table.cellWidget(self.activeindex, 0).rodkey axis = str(self.table.cellWidget(self.activeindex, 2).currentText()) resolution = float(self.table.cellWidget(self.activeindex, 3).text()) return rodkey, axis, resolution def checked(self): selection = [] for index in range(self.table.rowCount()): checkbox = self.table.cellWidget(index, 0) if checkbox.checkState(): rodkey = self.table.cellWidget(index, 0).rodkey axis = str(self.table.cellWidget(index, 2).currentText()) resolution = float(self.table.cellWidget(index, 3).text()) selection.append((rodkey, axis, resolution)) return selection class FitData(object): def __init__(self, filename): self.filename = filename self.axdict = dict() with h5py.File(self.filename, 'a') as db: for rodkey in self.rods(): spacename = db[rodkey].attrs['filename'] if not os.path.exists(spacename): warningbox = QMessageBox(2, 'Warning', 'Cannot find space {0} at file {1}; locate proper space'.format(rodkey, spacename), buttons=QMessageBox.Open) warningbox.exec_() spacename = str(QFileDialog.getOpenFileName(caption='Open space {0}'.format(rodkey), directory='.', filter='*.hdf5')) if not spacename: raise IOError('Select proper input') db[rodkey].attrs['filename'] = spacename self.axdict[rodkey] = binoculars.space.Axes.fromfile(spacename) def create_rod(self, rodkey, spacename): with h5py.File(self.filename, 'a') as db: if rodkey not in list(db.keys()): db.create_group(rodkey) db[rodkey].attrs['filename'] = spacename self.axdict[rodkey] = binoculars.space.Axes.fromfile(spacename) def delete_rod(self, rodkey): with h5py.File(self.filename, 'a') as db: del db[rodkey] def rods(self): with h5py.File(self.filename, 'a') as db: rods = list(db.keys()) return rods def copy(self, oldkey, newkey): with h5py.File(self.filename, 'a') as db: if oldkey in list(db.keys()): db.copy(db[oldkey], db, name=newkey) @property def filelist(self): filelist = [] with h5py.File(self.filename, 'a') as db: for key in db.keys(): filelist.append(db[key].attrs['filename']) return filelist def save(self, rodkey, key, value): with h5py.File(self.filename, 'a') as db: db[rodkey].attrs[str(key)] = value def load(self, rodkey, key): with h5py.File(self.filename, 'a') as db: if rodkey in db: if key in db[rodkey].attrs: return db[rodkey].attrs[str(key)] else: return None class RodData(FitData): def __init__(self, filename, rodkey, axis, resolution): super(RodData, self).__init__(filename) self.rodkey = rodkey self.slicekey = '{0}_{1}'.format(axis, resolution) self.axis = axis self.resolution = resolution with h5py.File(self.filename, 'a') as db: if rodkey in db: if self.slicekey not in db[rodkey]: db[rodkey].create_group(self.slicekey) db[rodkey][self.slicekey].create_group('attrs') def save(self, key, value): super(RodData, self).save(self.rodkey, key, value) def load(self, key): return super(RodData, self).load(self.rodkey, key) def paxes(self): axes = self.axdict[self.rodkey] projected = list(axes) axindex = axes.index(self.axis) projected.pop(axindex) return projected def get_bins(self): axes = self.axdict[self.rodkey] axindex = axes.index(self.axis) ax = axes[axindex] bins = binoculars.space.get_bins(ax, self.resolution) return bins, ax, axindex def rodlength(self): bins, ax, axindex = self.get_bins() return numpy.alen(bins) - 1 def get_index_value(self, index): return binoculars.space.get_axis_values(self.axdict[self.rodkey], self.axis, self.resolution)[index] def get_key(self, index): axes = self.axdict[self.rodkey] bins, ax, axindex = self.get_bins() start, stop = bins[index], bins[index + 1] k = [slice(None) for i in axes] k[axindex] = slice(start, stop) return k def space_from_index(self, index): with h5py.File(self.filename, 'a') as db: filename = db[self.rodkey].attrs['filename'] return binoculars.space.Space.fromfile(filename, self.get_key(index)).project(self.axis) def save_data(self, index, key, data): with h5py.File(self.filename, 'a') as db: id = '{0}_{1}_data'.format(int(index), key) mid = '{0}_{1}_mask'.format(int(index), key) try: db[self.rodkey][self.slicekey].create_dataset(id, data.shape, dtype=data.dtype, compression='gzip').write_direct(data) db[self.rodkey][self.slicekey].create_dataset(mid, data.shape, dtype=data.mask.dtype, compression='gzip').write_direct(data.mask) except RuntimeError: del db[self.rodkey][self.slicekey][id] del db[self.rodkey][self.slicekey][mid] db[self.rodkey][self.slicekey].create_dataset(id, data.shape, dtype=data.dtype, compression='gzip').write_direct(data) db[self.rodkey][self.slicekey].create_dataset(mid, data.shape, dtype=data.mask.dtype, compression='gzip').write_direct(data.mask) def load_data(self, index, key): with h5py.File(self.filename, 'a') as db: id = '{0}_{1}_data'.format(int(index), key) mid = '{0}_{1}_mask'.format(int(index), key) try: return numpy.ma.array(db[self.rodkey][self.slicekey][id][...], mask=db[self.rodkey][self.slicekey][mid][...]) except KeyError: return None def save_sliceattr(self, index, key, value): mkey = 'mask{0}'.format(key) with h5py.File(self.filename, 'a') as db: try: group = db[self.rodkey][self.slicekey]['attrs'] # # else it breaks with the old fitaid except KeyError: db[self.rodkey][self.slicekey].create_group('attrs') group = db[self.rodkey][self.slicekey]['attrs'] if not key in group: group.create_dataset(key, (self.rodlength(),)) group.create_dataset(mkey, (self.rodlength(),), dtype=numpy.bool).write_direct(numpy.ones(self.rodlength(), dtype=numpy.bool)) group[key][index] = value group[mkey][index] = 0 def load_sliceattr(self, index, key): mkey = 'mask{0}'.format(key) with h5py.File(self.filename, 'a') as db: try: group = db[self.rodkey][self.slicekey]['attrs'] except KeyError: db[self.rodkey][self.slicekey].create_group('attrs') group = db[self.rodkey][self.slicekey]['attrs'] if key in list(group.keys()): return numpy.ma.array(group[key][index], mask=group[mkey][index]) else: return None def all_attrkeys(self): with h5py.File(self.filename, 'a') as db: group = db[self.rodkey][self.slicekey]['attrs'] return list(group.keys()) def all_from_key(self, key): mkey = 'mask{0}'.format(key) axes = self.axdict[self.rodkey] with h5py.File(self.filename, 'a') as db: group = db[self.rodkey][self.slicekey]['attrs'] if key in list(group.keys()): return binoculars.space.get_axis_values(axes, self.axis, self.resolution), numpy.ma.array(group[key], mask=numpy.array(group[mkey])) def load_loc(self, index): loc = list() count = itertools.count() key = 'guessloc{0}'.format(next(count)) while self.load_sliceattr(index, key) != None: loc.append(self.load_sliceattr(index, key)) key = 'guessloc{0}'.format(next(count)) if len(loc) > 0: return loc else: count = itertools.count() key = 'loc{0}'.format(next(count)) while self.load_sliceattr(index, key) != None: loc.append(self.load_sliceattr(index, key)) key = 'loc{0}'.format(next(count)) if len(loc) > 0: return loc else: return None def save_loc(self, index, loc): for i, value in enumerate(loc): self.save_sliceattr(index, 'guessloc{0}'.format(i), value) def save_segments(self, segments): with h5py.File(self.filename, 'a') as db: try: db[self.rodkey][self.slicekey].create_dataset('segment', segments.shape, dtype=segments.dtype, compression='gzip').write_direct(segments) except RuntimeError: del db[self.rodkey][self.slicekey]['segment'] db[self.rodkey][self.slicekey].create_dataset('segment', segments.shape, dtype=segments.dtype, compression='gzip').write_direct(segments) def load_segments(self): with h5py.File(self.filename, 'a') as db: try: return numpy.array(db[self.rodkey][self.slicekey]['segment'][:]) except KeyError: return None def __iter__(self): for index in range(self.rodlength()): yield self.space_from_index(index) def short_filename(filename): return filename.split('/')[-1].split('.')[0] class HiddenToolbar(NavigationToolbar2QT): def __init__(self, corner_callback, canvas): super(HiddenToolbar, self).__init__(canvas, None) self._corner_callback = corner_callback self.zoom() def _generate_key(self): limits = [] for a in self.canvas.figure.get_axes(): limits.append([a.get_xlim(), a.get_ylim()]) return limits def press(self, event): self._corner_preclick = self._generate_key() def release(self, event): if self._corner_preclick == self._generate_key(): self._corner_callback(event.xdata, event.ydata) self._corner_preclick = None class FitWidget(QWidget): def __init__(self, database ,parent=None): super(FitWidget, self).__init__(parent) self.database = database vbox = QHBoxLayout() self.figure = matplotlib.figure.Figure() self.canvas = FigureCanvasQTAgg(self.figure) self.toolbar = HiddenToolbar(self.loc_callback, self.canvas) vbox.addWidget(self.canvas) self.setLayout(vbox) def loc_callback(self, x, y): if self.ax: self.database.save_loc(self.currentindex(), numpy.array([x, y])) def plot(self, index = None): if index == None: index = self.currentindex() space = self.database.space_from_index(index) fitdata = self.database.load_data(index, 'fit') self.figure.clear() self.figure.space_axes = space.axes info = self.database.get_index_value(index) label = self.database.axis if fitdata is not None: if space.dimension == 1: self.ax = self.figure.add_subplot(111) binoculars.plot.plot(space, self.figure, self.ax, fit = fitdata) elif space.dimension == 2: self.ax = self.figure.add_subplot(121) binoculars.plot.plot(space, self.figure, self.ax, fit = None) self.ax = self.figure.add_subplot(122) binoculars.plot.plot(space, self.figure, self.ax, fit = fitdata) else: self.ax = self.figure.add_subplot(111) binoculars.plot.plot(space, self.figure, self.ax) self.figure.suptitle('{0}, res = {1}, {2} = {3}'.format(self.database.rodkey, self.database.resolution, label, info)) self.canvas.draw() def fit(self, index, space, function): print(index) if not len(space.get_masked().compressed()) == 0: loc = self.get_loc() fit = function(space, loc = loc) fit.fitdata.mask = space.get_masked().mask self.database.save_data(index, 'fit', fit.fitdata) params = list(line.split(':')[0] for line in fit.summary.split('\n')) print(fit.result, fit.variance) for key, value in zip(params, fit.result): self.database.save_sliceattr(index, key, value) for key, value in zip(params, fit.variance): self.database.save_sliceattr(index, 'var_{0}'.format(key), value) def get_loc(self): return self.database.load_loc(self.currentindex()) def currentindex(self): index = self.database.load('index') if index == None: return 0 else: return index class IntegrateWidget(QWidget): def __init__(self, database, parent = None): super(IntegrateWidget, self).__init__(parent) self.parent = parent self.database = database self.figure = matplotlib.figure.Figure() self.canvas = FigureCanvasQTAgg(self.figure) self.toolbar = HiddenToolbar(self.loc_callback, self.canvas) hbox = QHBoxLayout() splitter = QSplitter(Qt.Vertical) self.make_controlwidget() splitter.addWidget(self.canvas) splitter.addWidget(self.control_widget) hbox.addWidget(splitter) self.setLayout(hbox) def make_controlwidget(self): self.control_widget = QWidget() integratebox = QVBoxLayout() intensitybox = QHBoxLayout() backgroundbox = QHBoxLayout() self.aroundroi = QCheckBox('background around roi') self.aroundroi.setChecked(1) self.aroundroi.clicked.connect(self.refresh_aroundroi) self.hsize = QDoubleSpinBox() self.vsize = QDoubleSpinBox() intensitybox.addWidget(QLabel('roi size:')) intensitybox.addWidget(self.hsize) intensitybox.addWidget(self.vsize) self.left = QDoubleSpinBox() self.right = QDoubleSpinBox() self.top = QDoubleSpinBox() self.bottom = QDoubleSpinBox() self.hsize.valueChanged.connect(self.send) self.vsize.valueChanged.connect(self.send) self.left.valueChanged.connect(self.send) self.right.valueChanged.connect(self.send) self.top.valueChanged.connect(self.send) self.bottom.valueChanged.connect(self.send) backgroundbox.addWidget(self.aroundroi) backgroundbox.addWidget(self.left) backgroundbox.addWidget(self.right) backgroundbox.addWidget(self.top) backgroundbox.addWidget(self.bottom) integratebox.addLayout(intensitybox) integratebox.addLayout(backgroundbox) self.fromfit = QRadioButton('peak from fit', self) self.fromfit.setChecked(True) self.fromfit.toggled.connect(self.plot_box) self.fromfit.toggled.connect(self.refresh_tracker) self.fromsegment = QRadioButton('peak from segment', self) self.fromsegment.setChecked(False) self.fromsegment.toggled.connect(self.plot_box) self.fromsegment.toggled.connect(self.refresh_tracker) self.trackergroup = QButtonGroup(self) self.trackergroup.addButton(self.fromfit) self.trackergroup.addButton(self.fromsegment) radiobox = QHBoxLayout() radiobox.addWidget(self.fromfit) radiobox.addWidget(self.fromsegment) integratebox.addLayout(radiobox) self.control_widget.setLayout(integratebox) def refresh_aroundroi(self): self.database.save('aroundroi', self.aroundroi.checkState()) axes = self.database.paxes() if not self.aroundroi.checkState(): self.left.setMinimum(axes[0].min) self.left.setMaximum(axes[0].max) self.right.setMinimum(axes[0].min) self.right.setMaximum(axes[0].max) self.top.setMinimum(axes[1].min) self.top.setMaximum(axes[1].max) self.bottom.setMinimum(axes[1].min) self.bottom.setMaximum(axes[1].max) else: self.left.setMinimum(0) self.left.setMaximum(axes[0].max - axes[0].min) self.right.setMinimum(0) self.right.setMaximum(axes[0].max - axes[0].min) self.top.setMinimum(0) self.top.setMaximum(axes[1].max - axes[1].min) self.bottom.setMinimum(0) self.bottom.setMaximum(axes[1].max - axes[1].min) def refresh_tracker(self): self.database.save('fromfit', self.fromfit.isChecked()) self.plot_box() def set_axis(self): roi = self.database.load('roi') aroundroi = self.database.load('aroundroi') if aroundroi != None: self.aroundroi.setChecked(aroundroi) else: self.aroundroi.setChecked(True) self.refresh_aroundroi() axes = self.database.paxes() self.hsize.setSingleStep(axes[1].res) self.hsize.setDecimals(len(str(axes[1].res)) - 2) self.vsize.setSingleStep(axes[0].res) self.vsize.setDecimals(len(str(axes[0].res)) - 2) self.left.setSingleStep(axes[1].res) self.left.setDecimals(len(str(axes[1].res)) - 2) self.right.setSingleStep(axes[1].res) self.right.setDecimals(len(str(axes[1].res)) - 2) self.top.setSingleStep(axes[0].res) self.top.setDecimals(len(str(axes[0].res)) - 2) self.bottom.setSingleStep(axes[0].res) self.bottom.setDecimals(len(str(axes[0].res)) - 2) tracker = self.database.load('fromfit') if tracker != None: if tracker: self.fromfit.setChecked(True) else: self.fromsegment.setChecked(True) if roi is not None: for box, value in zip([self.hsize, self.vsize, self.left, self.right, self.top, self.bottom], roi): box.setValue(value) def send(self): roi = [self.hsize.value(), self.vsize.value(), self.left.value() ,self.right.value() ,self.top.value(), self.bottom.value()] self.database.save('roi', roi) self.plot_box() def integrate(self, index, space): loc = self.get_loc() if loc != None: axes = space.axes key = space.get_key(self.intkey(loc, axes)) fitdata = self.database.load_data(index, 'fit') if fitdata is not None: fitintensity = fitdata[key].data.flatten() fitbkg = numpy.hstack([fitdata[space.get_key(bkgkey)].data.flatten() for bkgkey in self.bkgkeys(loc, axes)]) if numpy.alen(fitbkg) == 0: fitstructurefactor = fitintensity.sum() elif numpy.alen(fitintensity) == 0: fitstructurefactor = numpy.nan else: fitstructurefactor = numpy.sqrt(fitintensity.sum() - numpy.alen(fitintensity) * 1.0 / numpy.alen(fitbkg) * fitbkg.sum()) self.database.save_sliceattr(index, 'fitsf', fitstructurefactor) niintensity = space[self.intkey(loc, axes)].get_masked().compressed() try: intensity = interpolate(space[self.intkey(loc, axes)]).flatten() bkg = numpy.hstack([space[bkgkey].get_masked().compressed() for bkgkey in self.bkgkeys(loc, axes)]) interdata = space.get_masked() interdata[key] = intensity.reshape(interdata[key].shape) interdata[key].mask = numpy.zeros_like(interdata[key]) self.database.save_data(index, 'inter', interdata) except ValueError as e: print('Warning error interpolating silce {0}: {1}'.format(index, e)) intensity = numpy.array([]) bkg = numpy.array([]) if numpy.alen(intensity) == 0: structurefactor = numpy.nan nistructurefactor = numpy.nan elif numpy.alen(bkg) == 0: structurefactor = numpy.sqrt(intensity.sum()) nistructurefactor = numpy.sqrt(niintensity.sum()) else: structurefactor = numpy.sqrt(intensity.sum() - numpy.alen(intensity) * 1.0 / numpy.alen(bkg) * bkg.sum()) nistructurefactor = numpy.sqrt(niintensity.sum() - numpy.alen(niintensity) * 1.0 / numpy.alen(bkg) * bkg.sum()) self.database.save_sliceattr(index, 'sf', structurefactor) self.database.save_sliceattr(index, 'nisf', nistructurefactor) print('Structurefactor {0}: {1}'.format(index, structurefactor)) def intkey(self, coords, axes): vsize = self.vsize.value() / 2 hsize = self.hsize.value() / 2 return tuple(ax.restrict(slice(coord - size, coord + size)) for ax, coord, size in zip(axes, coords, [vsize, hsize])) def bkgkeys(self, coords, axes): aroundroi = self.database.load('aroundroi') if aroundroi: key = self.intkey(coords, axes) vsize = self.vsize.value() / 2 hsize = self.hsize.value() / 2 leftkey = (key[0], axes[1].restrict(slice(coords[1] - hsize - self.left.value(), coords[1] - hsize))) rightkey = (key[0], axes[1].restrict(slice(coords[1] + hsize, coords[1] + hsize + self.right.value()))) topkey = (axes[0].restrict(slice(coords[0] - vsize - self.top.value(), coords[0] - vsize)), key[1]) bottomkey = (axes[0].restrict(slice(coords[0] + vsize, coords[0] + vsize + self.bottom.value())), key[1]) return leftkey, rightkey, topkey, bottomkey else: return [(axes[0].restrict(slice(self.left.value(), self.right.value())), axes[1].restrict(slice(self.top.value(), self.bottom.value())))] def get_loc(self): if self.fromfit.isChecked(): return self.database.load_loc(self.currentindex()) else: index = self.currentindex() indexvalue = self.database.get_index_value(index) return self.parent.peakwidget.get_coords(indexvalue) def loc_callback(self, x, y): if self.ax: if self.fromfit.isChecked(): self.database.save_loc(self.currentindex(), numpy.array([x, y])) else: index = self.currentindex() indexvalue = self.database.get_index_value(index) self.parent.peakwidget.add_row(numpy.array([indexvalue, x, y])) self.plot_box() def plot(self, index = None): if index == None: index = self.currentindex() space = self.database.space_from_index(index) interdata = self.database.load_data(index, 'inter') info = self.database.get_index_value(index) label = self.database.axis self.figure.clear() self.figure.space_axes = space.axes if interdata is not None: if space.dimension == 1: self.ax = self.figure.add_subplot(111) binoculars.plot.plot(space, self.figure, self.ax, fit = interdata) elif space.dimension == 2: self.ax = self.figure.add_subplot(121) binoculars.plot.plot(space, self.figure, self.ax, fit = None) self.ax = self.figure.add_subplot(122) binoculars.plot.plot(space, self.figure, self.ax, fit = interdata) else: self.ax = self.figure.add_subplot(111) binoculars.plot.plot(space, self.figure, self.ax) self.figure.suptitle('{0}, res = {1}, {2} = {3}'.format(self.database.rodkey, self.database.resolution, label, info)) self.plot_box() self.canvas.draw() def plot_box(self): loc = self.get_loc() if len(self.figure.get_axes()) != 0 and loc != None: ax = self.figure.get_axes()[0] axes = self.figure.space_axes key = self.intkey(loc, axes) bkgkey = self.bkgkeys(loc, axes) ax.patches = [] rect = Rectangle((key[0].start, key[1].start), key[0].stop - key[0].start, key[1].stop - key[1].start, alpha = 0.2,color = 'k') ax.add_patch(rect) for k in bkgkey: bkg = Rectangle((k[0].start, k[1].start), k[0].stop - k[0].start, k[1].stop - k[1].start, alpha = 0.2,color = 'r') ax.add_patch(bkg) self.canvas.draw() def currentindex(self): index = self.database.load('index') if index == None: return 0 else: return index class ButtonedSlider(QWidget): slice_index = pyqtSignal(int) def __init__(self,parent=None): super(ButtonedSlider, self).__init__(parent) self.navigation_button_left_end = QPushButton('|<') self.navigation_button_left_one = QPushButton('<') self.navigation_slider = QSlider(Qt.Horizontal) self.navigation_slider.sliderReleased.connect(self.send) self.navigation_button_right_one = QPushButton('>') self.navigation_button_right_end = QPushButton('>|') self.navigation_button_left_end.setMaximumWidth(20) self.navigation_button_left_one.setMaximumWidth(20) self.navigation_button_right_end.setMaximumWidth(20) self.navigation_button_right_one.setMaximumWidth(20) self.navigation_button_left_end.clicked.connect(self.slider_change_left_end) self.navigation_button_left_one.clicked.connect(self.slider_change_left_one) self.navigation_button_right_end.clicked.connect(self.slider_change_right_end) self.navigation_button_right_one.clicked.connect(self.slider_change_right_one) box = QHBoxLayout() box.addWidget(self.navigation_button_left_end) box.addWidget(self.navigation_button_left_one) box.addWidget(self.navigation_slider) box.addWidget(self.navigation_button_right_one) box.addWidget(self.navigation_button_right_end) self.setDisabled(True) self.setLayout(box) def set_length(self,length): self.navigation_slider.setMinimum(0) self.navigation_slider.setMaximum(length - 1) self.navigation_slider.setTickPosition(QSlider.TicksBelow) self.navigation_slider.setValue(0) self.setEnabled(True) def send(self): self.slice_index.emit(self.navigation_slider.value()) def slider_change_left_one(self): self.navigation_slider.setValue(max(self.navigation_slider.value() - 1, 0)) self.send() def slider_change_left_end(self): self.navigation_slider.setValue(0) self.send() def slider_change_right_one(self): self.navigation_slider.setValue(min(self.navigation_slider.value() + 1, self.navigation_slider.maximum())) self.send() def slider_change_right_end(self): self.navigation_slider.setValue(self.navigation_slider.maximum()) self.send() def index(self): return self.navigation_slider.value() def set_index(self, index): self.navigation_slider.setValue(index) class HiddenToolbar2(NavigationToolbar2QT): def __init__(self, canvas): super(HiddenToolbar2, self).__init__(canvas, None) self.zoom() class OverviewWidget(QWidget): def __init__(self, database, parent = None): super(OverviewWidget, self).__init__(parent) self.databaselist = list() self.figure = matplotlib.figure.Figure() self.canvas = FigureCanvasQTAgg(self.figure) self.toolbar = HiddenToolbar2(self.canvas) self.table = QTableWidget(0,2) self.make_table() self.table.cellClicked.connect(self.plot) hbox = QHBoxLayout() splitter = QSplitter(Qt.Horizontal) splitter.addWidget(self.canvas) splitter.addWidget(self.control_widget) hbox.addWidget(splitter) self.setLayout(hbox) def select(self): selection = [] for index in range(self.table.rowCount()): checkbox = self.table.cellWidget(index, 0) if checkbox.checkState(): selection.append(str(self.table.cellWidget(index,1).text())) return selection def make_table(self): self.control_widget = QWidget() vbox = QVBoxLayout() minibox = QHBoxLayout() vbox.addWidget(self.table) self.table.setHorizontalHeaderLabels(['','param']) for index, width in enumerate([25,50]): self.table.setColumnWidth(index, width) self.log = QCheckBox('log') self.log.clicked.connect(self.plot) self.export_button = QPushButton('export curves') self.export_button.clicked.connect(self.export) minibox.addWidget(self.log) minibox.addWidget(self.export_button) vbox.addLayout(minibox) self.control_widget.setLayout(vbox) def export(self): folder = str(QFileDialog.getExistingDirectory(self, "Select directory to save curves")) params = self.select() for param in params: for database in self.databaselist: x, y = database.all_from_key(param) args = numpy.argsort(x) numpy.savetxt( os.path.join(folder,'{0}_{1}.txt'.format(param, database.rodkey)), numpy.vstack(arr[args] for arr in [x, y]).T) def refresh(self, databaselist): self.databaselist = databaselist params = self.select() while self.table.rowCount() > 0: self.table.removeRow(0) allparams = [[param for param in database.all_attrkeys() if not param.startswith('mask')] for database in databaselist] allparams.extend([['locx_s', 'locy_s']] for database in databaselist if database.load_segments() is not None) if len(allparams) > 0: uniqueparams = numpy.unique(numpy.hstack([params for params in allparams])) else: uniqueparams = [] for param in uniqueparams: index = self.table.rowCount() self.table.insertRow(index) checkboxwidget = QCheckBox() if param in params: checkboxwidget.setChecked(1) else: checkboxwidget.setChecked(0) self.table.setCellWidget(index,0, checkboxwidget) checkboxwidget.clicked.connect(self.plot) item = QLabel(param) self.table.setCellWidget(index, 1, item) self.plot() def plot(self): params = self.select() self.figure.clear() self.ax = self.figure.add_subplot(111) for param in params: for database in self.databaselist: if param == 'locx_s': segments = database.load_segments() if segments is not None: x = numpy.hstack([database.get_index_value(index) for index in range(database.rodlength())]) y = numpy.vstack([get_coords(xvalue, segments) for xvalue in x]) self.ax.plot(x, y[:,0], '+', label = '{0} - {1}'.format('locx_s', database.rodkey)) elif param == 'locy_s': segments = database.load_segments() if segments is not None: x = numpy.hstack([database.get_index_value(index) for index in range(database.rodlength())]) y = numpy.vstack([get_coords(xvalue, segments) for xvalue in x]) self.ax.plot(x, y[:,1], '+', label = '{0} - {1}'.format('locy_s', database.rodkey)) else: x, y = database.all_from_key(param) self.ax.plot(x, y, '+', label = '{0} - {1}'.format(param, database.rodkey)) self.ax.legend() if self.log.checkState(): self.ax.semilogy() self.canvas.draw() class PeakWidget(QWidget): def __init__(self, database, parent=None): super(PeakWidget, self).__init__(parent) self.database = database # create a QTableWidget self.table = QTableWidget(0, 3, self) self.table.horizontalHeader().setStretchLastSection(True) self.table.verticalHeader().setVisible(False) self.table.itemChanged.connect(self.save) self.btn_add_row = QPushButton('+', self) self.btn_add_row.clicked.connect(self.add_row) self.buttonRemove = QPushButton('-', self) self.buttonRemove.clicked.connect(self.remove) vbox = QVBoxLayout() hbox = QHBoxLayout() hbox.addWidget(self.btn_add_row) hbox.addWidget(self.buttonRemove) vbox.addLayout(hbox) vbox.addWidget(self.table) self.setLayout(vbox) def set_axis(self): self.axes = self.database.paxes() while self.table.rowCount() > 0: self.table.removeRow(0) segments = self.database.load_segments() if segments is not None: for index in range(segments.shape[0]): self.add_row(segments[index, :]) self.table.setHorizontalHeaderLabels(['{0}'.format(self.database.axis), '{0}'.format(self.axes[0].label), '{0}'.format(self.axes[1].label)]) def add_row(self, row = None): rowindex = self.table.rowCount() self.table.insertRow(rowindex) if row is not None: for index in range(3): newitem = QTableWidgetItem(str(row[index])) self.table.setItem(rowindex, index, newitem) def remove(self): self.table.removeRow(self.table.currentRow()) self.save() def axis_coords(self): a = numpy.zeros((self.table.rowCount(), self.table.columnCount())) for rowindex in range(a.shape[0]): for columnindex in range(a.shape[1]): item = self.table.item(rowindex, columnindex) if item is not None: a[rowindex, columnindex] = float(item.text()) return a def save(self): self.database.save_segments(self.axis_coords()) def get_coords(self, x): return get_coords(x, self.axis_coords()) def get_coords(x, coords): if coords.shape[0] == 0: return None if coords.shape[0] == 1: return coords[0,1:] args = numpy.argsort(coords[:,0]) x0 = coords[args,0] x1 = coords[args,1] x2 = coords[args,2] if x < x0.min(): first = 0 last = 1 elif x > x0.max(): first = -2 last = -1 else: first = numpy.searchsorted(x0, x) - 1 last = numpy.searchsorted(x0, x) a1 = (x1[last] - x1[first]) / (x0[last] - x0[first]) b1 = x1[first] - a1 * x0[first] a2 = (x2[last] - x2[first]) / (x0[last] - x0[first]) b2 = x2[first] - a2 * x0[first] return numpy.array([a1 * x + b1, a2 * x + b2]) def interpolate(space): data = space.get_masked() mask = data.mask grid = numpy.vstack([numpy.ma.array(g, mask=mask).compressed() for g in space.get_grid()]).T open = numpy.vstack([numpy.ma.array(g, mask=numpy.invert(mask)).compressed() for g in space.get_grid()]).T if open.shape[0] == 0: return data.compressed() elif grid.shape[0] == 0: return data.compressed() else: interpolated = griddata(grid, data.compressed(), open) values = data.data.copy() values[mask] = interpolated mask = numpy.isnan(values) if mask.sum() > 0: data = numpy.ma.array(values, mask = mask) grid = numpy.vstack([numpy.ma.array(g, mask=mask).compressed() for g in space.get_grid()]).T open = numpy.vstack([numpy.ma.array(g, mask=numpy.invert(mask)).compressed() for g in space.get_grid()]).T interpolated = griddata(grid, data.compressed(), open, method = 'nearest') values[mask] = interpolated return values def find_unused_rodkey(rodkey, rods): if not rodkey in rods: return rodkey for index in itertools.count(0): newkey = '{0}_{1}'.format(rodkey, index) if newkey not in rods: return newkey if __name__ == "__main__": app = QApplication(sys.argv) main = Window() main.resize(1000, 600) main.show() sys.exit(app.exec_()) binoculars-0.0.4/scripts/binoculars-gui000077500000000000000000001355231343276063200202330ustar00rootroot00000000000000#!/usr/bin/env python from __future__ import unicode_literals import sys import os import json import signal import subprocess import socket import threading import numpy import matplotlib.figure import matplotlib.image from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT from matplotlib.pyplot import rcParams rcParams['image.cmap'] = 'jet' # from mpl_toolkits.mplot3d import Axes3D from PyQt5.Qt import (Qt) # noqa from PyQt5.QtCore import (QThread, pyqtSignal) from PyQt5.QtGui import (QPainter) from PyQt5.QtWidgets import (QAction, QApplication, QStyle, QSlider, QMenuBar, QTabWidget, QFileDialog, QStatusBar, QMessageBox, QRadioButton, QButtonGroup, QCheckBox, QPushButton, QHBoxLayout, QVBoxLayout, QSplitter, QTableWidgetItem, QTableWidget, QLabel, QLineEdit, QStyleOptionSlider, QMainWindow, QWidget) #python3 support PY3 = sys.version_info > (3,) if PY3: import socketserver import queue else: import SocketServer as socketserver import Queue as queue def set_src(): import os.path as osp dirpath = osp.join(osp.dirname(osp.abspath(__file__)), osp.pardir) sys.path.insert(0, osp.abspath(dirpath)) try: import binoculars.main import binoculars.space import binoculars.plot import binoculars.util except ImportError: # try to use code from src distribution set_src() import binoculars.main import binoculars.space import binoculars.plot import binoculars.util #RangeSlider is taken from https://www.mail-archive.com/pyqt@riverbankcomputing.com/msg22889.html class RangeSlider(QSlider): """ A slider for ranges. This class provides a dual-slider for ranges, where there is a defined maximum and minimum, as is a normal slider, but instead of having a single slider value, there are 2 slider values. This class emits the same signals as the QSlider base class, with the exception of valueChanged """ def __init__(self, *args): super(RangeSlider, self).__init__(*args) self._low = self.minimum() self._high = self.maximum() self.pressed_control = QStyle.SC_None self.hover_control = QStyle.SC_None self.click_offset = 0 # 0 for the low, 1 for the high, -1 for both self.active_slider = 0 def low(self): return self._low def setLow(self, low): self._low = low self.update() def high(self): return self._high def setHigh(self, high): self._high = high self.update() def paintEvent(self, event): # based on http://qt.gitorious.org/qt/qt/blobs/master/src/gui/widgets/qslider.cpp painter = QPainter(self) style = QApplication.style() for i, value in enumerate([self._low, self._high]): opt = QStyleOptionSlider() self.initStyleOption(opt) # Only draw the groove for the first slider so it doesn't get drawn # on top of the existing ones every time if i == 0: opt.subControls = QStyle.SC_SliderHandle # QStyle.SC_SliderGroove | QStyle.SC_SliderHandle else: opt.subControls = QStyle.SC_SliderHandle if self.tickPosition() != self.NoTicks: opt.subControls |= QStyle.SC_SliderTickmarks if self.pressed_control: opt.activeSubControls = self.pressed_control opt.state |= QStyle.State_Sunken else: opt.activeSubControls = self.hover_control opt.sliderPosition = value opt.sliderValue = value style.drawComplexControl(QStyle.CC_Slider, opt, painter, self) def mousePressEvent(self, event): event.accept() style = QApplication.style() button = event.button() # In a normal slider control, when the user clicks on a point in the # slider's total range, but not on the slider part of the control the # control would jump the slider value to where the user clicked. # For this control, clicks which are not direct hits will slide both # slider parts if button: opt = QStyleOptionSlider() self.initStyleOption(opt) self.active_slider = -1 for i, value in enumerate([self._low, self._high]): opt.sliderPosition = value hit = style.hitTestComplexControl(style.CC_Slider, opt, event.pos(), self) if hit == style.SC_SliderHandle: self.active_slider = i self.pressed_control = hit self.triggerAction(self.SliderMove) self.setRepeatAction(self.SliderNoAction) self.setSliderDown(True) break if self.active_slider < 0: self.pressed_control = QStyle.SC_SliderHandle self.click_offset = self.__pixelPosToRangeValue(self.__pick(event.pos())) self.triggerAction(self.SliderMove) self.setRepeatAction(self.SliderNoAction) else: event.ignore() def mouseReleaseEvent(self, _event): self.sliderReleased.emit() def mouseMoveEvent(self, event): if self.pressed_control != QStyle.SC_SliderHandle: event.ignore() return event.accept() new_pos = self.__pixelPosToRangeValue(self.__pick(event.pos())) opt = QStyleOptionSlider() self.initStyleOption(opt) if self.active_slider < 0: offset = new_pos - self.click_offset self._high += offset self._low += offset if self._low < self.minimum(): diff = self.minimum() - self._low self._low += diff self._high += diff if self._high > self.maximum(): diff = self.maximum() - self._high self._low += diff self._high += diff elif self.active_slider == 0: if new_pos >= self._high: new_pos = self._high - 1 self._low = new_pos else: if new_pos <= self._low: new_pos = self._low + 1 self._high = new_pos self.click_offset = new_pos self.update() self.sliderMoved.emit(new_pos) def __pick(self, pt): if self.orientation() == Qt.Horizontal: return pt.x() else: return pt.y() def __pixelPosToRangeValue(self, pos): opt = QStyleOptionSlider() self.initStyleOption(opt) style = QApplication.style() gr = style.subControlRect(style.CC_Slider, opt, style.SC_SliderGroove, self) sr = style.subControlRect(style.CC_Slider, opt, style.SC_SliderHandle, self) if self.orientation() == Qt.Horizontal: slider_length = sr.width() slider_min = gr.x() slider_max = gr.right() - slider_length + 1 else: slider_length = sr.height() slider_min = gr.y() slider_max = gr.bottom() - slider_length + 1 return style.sliderValueFromPosition(self.minimum(), self.maximum(), pos-slider_min, slider_max-slider_min, opt.upsideDown) class Window(QMainWindow): def __init__(self, parent=None): super(Window, self).__init__(parent) newproject = QAction("New project", self) newproject.triggered.connect(self.newproject) loadproject = QAction("Open project", self) loadproject.triggered.connect(self.loadproject) saveproject = QAction("Save project", self) saveproject.triggered.connect(self.saveproject) addspace = QAction("Import space", self) addspace.triggered.connect(self.add_to_project) savespace = QAction("Export space", self) savespace.triggered.connect(self.exportspace) menu_bar = QMenuBar() f = menu_bar.addMenu("&File") f.addAction(newproject) f.addAction(loadproject) f.addAction(saveproject) f.addAction(addspace) f.addAction(savespace) merge = QAction("Merge", self) merge.triggered.connect(self.merge) subtract = QAction("Subtract", self) subtract.triggered.connect(self.subtract) edit = menu_bar.addMenu("&Edit") edit.addAction(merge) edit.addAction(subtract) start_server = QAction("Start server queue", self) start_server.triggered.connect(lambda: self.open_server(startq=True)) stop_server = QAction("Stop server queue", self) stop_server.triggered.connect(self.kill_server) recieve = QAction("Open for spaces", self) recieve.triggered.connect(lambda: self.open_server(startq=False)) serve = menu_bar.addMenu("&Serve") serve.addAction(start_server) serve.addAction(stop_server) serve.addAction(recieve) self.tab_widget = QTabWidget(self) self.tab_widget.setTabsClosable(True) self.tab_widget.tabCloseRequested.connect(self.tab_widget.removeTab) self.statusbar = QStatusBar() self.setCentralWidget(self.tab_widget) self.setMenuBar(menu_bar) self.setStatusBar(self.statusbar) self.threads = [] self.pro = None def closeEvent(self, event): self.kill_subprocess() super(Window, self).closeEvent(event) def newproject(self): widget = ProjectWidget([], parent=self) self.tab_widget.addTab(widget, 'New Project') self.tab_widget.setCurrentWidget(widget) def loadproject(self, filename=None): if not filename: dialog = QFileDialog(self, "Load project") dialog.setNameFilters(['binoculars project file (*.proj)']) dialog.setFileMode(QFileDialog.ExistingFiles) dialog.setAcceptMode(QFileDialog.AcceptOpen) if not dialog.exec_(): return fname = dialog.selectedFiles() if not fname: return for name in fname: try: widget = ProjectWidget.fromfile(str(name), parent=self) self.tab_widget.addTab(widget, short_filename(str(name))) self.tab_widget.setCurrentWidget(widget) except Exception as e: QMessageBox.critical(self, 'Load project', 'Unable to load project from {}: {}'.format(fname, e)) else: widget = ProjectWidget.fromfile(filename, parent=self) self.tab_widget.addTab(widget, short_filename(filename)) def saveproject(self): widget = self.tab_widget.currentWidget() dialog = QFileDialog(self, "Save project") dialog.setNameFilters(['binoculars project file (*.proj)']) dialog.setDefaultSuffix('proj') dialog.setFileMode(QFileDialog.AnyFile) dialog.setAcceptMode(QFileDialog.AcceptSave) if not dialog.exec_(): return fname = dialog.selectedFiles()[0] if not fname: return try: index = self.tab_widget.currentIndex() self.tab_widget.setTabText(index, short_filename(fname)) widget.tofile(fname) except Exception as e: QMessageBox.critical(self, 'Save project', 'Unable to save project to {}: {}'.format(fname, e)) def add_to_project(self): if self.tab_widget.count() == 0: self.newproject() dialog = QFileDialog(self, "Import spaces") dialog.setNameFilters(['binoculars space file (*.hdf5)']) dialog.setFileMode(QFileDialog.ExistingFiles) dialog.setAcceptMode(QFileDialog.AcceptOpen) if not dialog.exec_(): return fname = dialog.selectedFiles() if not fname: return for name in fname: try: widget = self.tab_widget.currentWidget() widget.addspace(str(name), True) except Exception as _e: raise #QMessageBox.critical(self, 'Import spaces', 'Unable to import space {}: {}'.format(str(name), e)) def exportspace(self): widget = self.tab_widget.currentWidget() dialog = QFileDialog(self, "save mesh") dialog.setFileMode(QFileDialog.AnyFile) dialog.setAcceptMode(QFileDialog.AcceptSave) if not dialog.exec_(): return fname = dialog.selectedFiles()[0] if not fname: return try: _index = self.tab_widget.currentIndex() widget.space_to_file(str(fname)) except Exception as e: QMessageBox.critical(self, 'export fitdata', 'Unable to save mesh to {}: {}'.format(fname, e)) def merge(self): widget = self.tab_widget.currentWidget() dialog = QFileDialog(self, "save mesh") dialog.setNameFilters(['binoculars space file (*.hdf5)']) dialog.setDefaultSuffix('hdf5') dialog.setFileMode(QFileDialog.AnyFile) dialog.setAcceptMode(QFileDialog.AcceptSave) if not dialog.exec_(): return fname = dialog.selectedFiles()[0] if not fname: return try: _index = self.tab_widget.currentIndex() widget.merge(str(fname)) except Exception as e: QMessageBox.critical(self, 'merge', 'Unable to save mesh to {}: {}'.format(fname, e)) def subtract(self): dialog = QFileDialog(self, "subtract space") dialog.setNameFilters(['binoculars space file (*.hdf5)']) dialog.setFileMode(QFileDialog.ExistingFiles) dialog.setAcceptMode(QFileDialog.AcceptOpen) if not dialog.exec_(): return fname = dialog.selectedFiles() if not fname: return for name in fname: try: widget = self.tab_widget.currentWidget() widget.subtractspace(str(name)) except Exception as e: QMessageBox.critical(self, 'Import spaces', 'Unable to import space {}: {}'.format(fname, e)) def open_server(self, startq=True): if len(self.threads) != 0: print('Server already running') else: HOST, PORT = socket.gethostbyname(socket.gethostname()), 0 self.q = queue.Queue() server = ThreadedTCPServer((HOST, PORT), SpaceTCPHandler) server.q = self.q self.ip, self.port = server.server_address if startq: cmd = ['python', os.path.join(os.path.dirname(__file__), 'binoculars-server.py'), str(self.ip), str(self.port)] self.pro = subprocess.Popen(cmd, stdin=None, stdout=None, stderr=None, preexec_fn=os.setsid) server_thread = threading.Thread(target=server.serve_forever) server_thread.daemon = True server_thread.start() updater = UpdateThread() updater.data_found.connect(self.update) updater.q = self.q self.threads.append(updater) updater.start() if not startq: print(('GUI server started running at ip {0} and port {1}.'.format(self.ip, self.port))) def kill_server(self): if len(self.threads) == 0: print('No server running.') else: self.threads = [] self.kill_subprocess() self.pro = None def kill_subprocess(self): if not self.pro == None: os.killpg(self.pro.pid, signal.SIGTERM) def update(self): names = [] for tab in range(self.tab_widget.count()): names.append(self.tab_widget.tabText(tab)) if 'server' not in names: widget = ProjectWidget([], parent=self) self.tab_widget.addTab(widget, 'server') names.append('server') index = names.index('server') serverwidget = self.tab_widget.widget(index) while not self.threads[0].fq.empty(): command, space = self.threads[0].fq.get() serverwidget.table.addfromserver(command, space) serverwidget.table.select() if serverwidget.auto_update.isChecked(): serverwidget.limitwidget.refresh() class UpdateThread(QThread): fq = queue.Queue() data_found = pyqtSignal(object) def run(self): delay = binoculars.util.loop_delayer(1) jobs = [] labels = [] while 1: if not self.q.empty(): command, space = self.q.get() if command in labels: jobs[labels.index(command)].append(space) else: jobs.append([space]) labels.append(command) elif self.q.empty() and len(jobs) > 0: self.fq.put((labels.pop(), binoculars.space.sum(jobs.pop()))) self.data_found.emit('data found') else: next(delay) class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer): pass class SpaceTCPHandler(socketserver.BaseRequestHandler): def handle(self): command, config, metadata, axes, photons, contributions = binoculars.util.socket_recieve(self) space = binoculars.space.Space(binoculars.space.Axes.fromarray(axes)) space.config = binoculars.util.ConfigFile.fromserial(config) space.config.command = command space.config.origin = 'server' space.metadata = binoculars.util.MetaData.fromserial(metadata) space.photons = photons space.contributions = contributions self.server.q.put((command, space)) class HiddenToolbar(NavigationToolbar2QT): def __init__(self, show_coords, update_sliders, canvas): NavigationToolbar2QT.__init__(self, canvas, None) self.show_coords = show_coords self.update_sliders = update_sliders self.zoom() self.threed = False def mouse_move(self, event): if not self.threed: self.show_coords(event) def press_zoom(self, event): super(HiddenToolbar, self).press_zoom(event) if not self.threed: self.inaxes = event.inaxes def release_zoom(self, event): super(HiddenToolbar, self).release_zoom(event) if not self.threed: self.update_sliders(self.inaxes) class ProjectWidget(QWidget): def __init__(self, filelist, key=None, projection=None, parent=None): super(ProjectWidget, self).__init__(parent) self.parent = parent self.figure = matplotlib.figure.Figure() self.canvas = FigureCanvasQTAgg(self.figure) self.toolbar = HiddenToolbar(self.show_coords, self.update_sliders, self.canvas) self.lin = QRadioButton('lin', self) self.lin.setChecked(False) self.lin.toggled.connect(self.plot) self.log = QRadioButton('log', self) self.log.setChecked(True) self.log.toggled.connect(self.plot) self.loglog = QRadioButton('loglog', self) self.loglog.setChecked(False) self.loglog.toggled.connect(self.plot) self.loggroup = QButtonGroup(self) self.loggroup.addButton(self.lin) self.loggroup.addButton(self.log) self.loggroup.addButton(self.loglog) self.swap_axes = QCheckBox('ax', self) self.swap_axes.setChecked(False) self.swap_axes.stateChanged.connect(self.plot) self.samerange = QCheckBox('same', self) self.samerange.setChecked(False) self.samerange.stateChanged.connect(self.update_colorbar) self.legend = QCheckBox('legend', self) self.legend.setChecked(True) self.legend.stateChanged.connect(self.plot) self.threed = QCheckBox('3d', self) self.threed.setChecked(False) self.threed.stateChanged.connect(self.plot) self.auto_update = QCheckBox('auto', self) self.auto_update.setChecked(True) self.datarange = RangeSlider(Qt.Horizontal) self.datarange.setMinimum(0) self.datarange.setMaximum(250) self.datarange.setLow(0) self.datarange.setHigh(self.datarange.maximum()) self.datarange.setTickPosition(QSlider.TicksBelow) self.datarange.sliderMoved.connect(self.update_colorbar) self.table = TableWidget(filelist) self.table.selectionError.connect(self.selectionerror) self.table.plotaxesChanged.connect(self.plotaxes_changed) self.key = key self.projection = projection self.button_save = QPushButton('save image') self.button_save.clicked.connect(self.save) self.button_refresh = QPushButton('refresh') self.button_refresh.clicked.connect(self.table.select) self.limitwidget = LimitWidget(self.table.plotaxes) self.limitwidget.keydict.connect(self.update_key) self.limitwidget.rangechange.connect(self.update_figure_range) self.initUI() self.table.select() def initUI(self): self.control_widget = QWidget(self) hbox = QHBoxLayout() left = QVBoxLayout() pushbox = QHBoxLayout() pushbox.addWidget(self.button_save) pushbox.addWidget(self.button_refresh) left.addLayout(pushbox) radiobox = QHBoxLayout() self.group = QButtonGroup(self) for label in ['stack', 'grid']: rb = QRadioButton(label, self.control_widget) rb.setChecked(True) self.group.addButton(rb) radiobox.addWidget(rb) radiobox.addWidget(self.lin) radiobox.addWidget(self.log) radiobox.addWidget(self.loglog) datarangebox = QHBoxLayout() datarangebox.addWidget(self.samerange) datarangebox.addWidget(self.legend) datarangebox.addWidget(self.threed) datarangebox.addWidget(self.swap_axes) datarangebox.addWidget(self.auto_update) left.addLayout(radiobox) left.addLayout(datarangebox) left.addWidget(self.datarange) left.addWidget(self.table) left.addWidget(self.limitwidget) self.control_widget.setLayout(left) splitter = QSplitter(Qt.Horizontal) splitter.addWidget(self.control_widget) splitter.addWidget(self.canvas) hbox.addWidget(splitter) self.setLayout(hbox) def show_coords(self, event): plotaxes = event.inaxes if hasattr(plotaxes, 'space'): if plotaxes.space.dimension == 2: labels = numpy.array([plotaxes.get_xlabel(), plotaxes.get_ylabel()]) order = [plotaxes.space.axes.index(label) for label in labels] labels = labels[order] coords = numpy.array([event.xdata, event.ydata])[order] try: rounded_coords = [ax[ax.get_index(coord)] for ax, coord in zip(plotaxes.space.axes, coords)] intensity = '{0:.2e}'.format(plotaxes.space[list(coords)]) self.parent.statusbar.showMessage('{0} = {1}, {2} = {3}, Intensity = {4}'.format(labels[0], rounded_coords[0], labels[1], rounded_coords[1], intensity)) except ValueError: self.parent.statusbar.showMessage('out of range') elif plotaxes.space.dimension == 1: xaxis = plotaxes.space.axes[plotaxes.space.axes.index(plotaxes.get_xlabel())] if event.xdata in xaxis: xcoord = xaxis[xaxis.get_index(event.xdata)] intensity = '{0:.2e}'.format(event.ydata) self.parent.statusbar.showMessage('{0} = {1}, Intensity = {2}'.format(xaxis.label, xcoord, intensity)) def update_sliders(self, plotaxes): if not plotaxes == None: space = plotaxes.space if hasattr(plotaxes, 'space'): if space.dimension == 2: labels = numpy.array([plotaxes.get_xlabel(), plotaxes.get_ylabel()]) limits = list(lim for lim in [plotaxes.get_xlim(), plotaxes.get_ylim()]) elif space.dimension == 1: labels = [plotaxes.get_xlabel()] limits = [plotaxes.get_xlim()] keydict = dict() for key, value in zip(labels, limits): keydict[key] = value self.limitwidget.update_from_zoom(keydict) def selectionerror(self, message): self.limitwidget.setDisabled(True) self.errormessage(message) def plotaxes_changed(self, plotaxes): self.limitwidget.setEnabled(True) self.limitwidget.axes_update(plotaxes) def update_key(self, input): self.key = input['key'] self.projection = input['project'] if len(self.limitwidget.sliders) - len(self.projection) == 1: self.datarange.setDisabled(True) self.samerange.setDisabled(True) self.swap_axes.setDisabled(True) self.loglog.setEnabled(True) elif len(self.limitwidget.sliders) - len(self.projection) == 2: self.loglog.setDisabled(True) self.datarange.setEnabled(True) self.samerange.setEnabled(True) self.swap_axes.setEnabled(True) self.plot() def get_norm(self, mi, ma): log = self.log.isChecked() rangemin = self.datarange.low() * 1.0 / self.datarange.maximum() rangemax = self.datarange.high() * 1.0 / self.datarange.maximum() if log: power = 3 vmin = mi + (ma - mi) * rangemin ** power vmax = mi + (ma - mi) * rangemax ** power else: vmin = mi + (ma - mi) * rangemin vmax = mi + (ma - mi) * rangemax if log: return matplotlib.colors.LogNorm(vmin, vmax) else: return matplotlib.colors.Normalize(vmin, vmax) def get_normlist(self): _log = self.log.isChecked() same = self.samerange.checkState() if same: return [self.get_norm(min(self.datamin), max(self.datamax))] * len(self.datamin) else: norm = [] for i in range(len(self.datamin)): norm.append(self.get_norm(self.datamin[i], self.datamax[i])) return norm def plot(self): if len(self.table.plotaxes) == 0: return self.figure.clear() self.parent.statusbar.clearMessage() self.figure_images = [] log = self.log.isChecked() loglog = self.loglog.isChecked() plotcount = len(self.table.selection) plotcolumns = int(numpy.ceil(numpy.sqrt(plotcount))) plotrows = int(numpy.ceil(float(plotcount) / plotcolumns)) plotoption = None if self.group.checkedButton(): plotoption = self.group.checkedButton().text() spaces = [] for i, filename in enumerate(self.table.selection): axes = self.table.getax(filename) rkey = axes.restricted_key(self.key) if rkey == None: space = self.table.getspace(filename) else: space = self.table.getspace(filename, rkey) projection = [ax for ax in self.projection if ax in space.axes] if projection: space = space.project(*projection) dimension = space.dimension if dimension == 0: self.errormessage('Choose suitable number of projections') if dimension == 3 and not self.threed.isChecked(): self.errormessage('Switch on 3D plotting, only works with small spaces') spaces.append(space) self.datamin = [] self.datamax = [] for space in spaces: data = space.get_masked().compressed() if log or loglog: data = data[data > 0] self.datamin.append(data.min()) self.datamax.append(data.max()) norm = self.get_normlist() if dimension == 1 or dimension == 2: self.toolbar.threed = False else: self.toolbar.threed = True for i, space in enumerate(spaces): filename = self.table.selection[i] basename = os.path.splitext(os.path.basename(filename))[0] if plotcount > 1: if dimension == 1 and (plotoption == 'stack' or plotoption == None): self.ax = self.figure.add_subplot(111) if dimension == 2 and plotoption != 'grid': sys.stderr.write('warning: stack display not supported for multi-file-plotting, falling back to grid\n') plotoption = 'grid' elif dimension > 3: sys.stderr.write('error: cannot display 4 or higher dimensional data, use --project or --slice to decrease dimensionality\n') sys.exit(1) else: self.ax = self.figure.add_subplot(111) if plotoption == 'grid': if dimension == 1 or dimension == 2: self.ax = self.figure.add_subplot(plotrows, plotcolumns, i+1) elif self.threed.isChecked(): self.ax = self.figure.gca(projection='3d') self.ax.set_title(basename) if dimension == 2 and self.swap_axes.checkState(): space = space.reorder(list(ax.label for ax in space.axes)[::-1]) self.ax.space = space im = binoculars.plot.plot(space, self.figure, self.ax, log=log, loglog=loglog, label=basename, norm=norm[i]) self.figure_images.append(im) if dimension == 1 and self.legend.checkState(): self.ax.legend() self.update_figure_range(self.key_to_str(self.key)) self.canvas.draw() def merge(self, filename): try: spaces = tuple(self.table.getspace(selected_filename) for selected_filename in self.table.selection) newspace = binoculars.space.sum(binoculars.space.make_compatible(spaces)) newspace.tofile(filename) list(map(self.table.remove, self.table.selection)) self.table.addspace(filename, True) except Exception as e: QMessageBox.critical(self, 'Merge', 'Unable to merge the meshes. {}'.format(e)) def subtractspace(self, filename): try: subtractspace = binoculars.space.Space.fromfile(filename) spaces = tuple(self.table.getspace(selected_filename) for selected_filename in self.table.selection) newspaces = tuple(space - subtractspace for space in spaces) for space, selected_filename in zip(newspaces, self.table.selection): newfilename = binoculars.util.find_unused_filename(selected_filename) space.tofile(newfilename) self.table.remove(selected_filename) self.table.addspace(newfilename, True) except Exception as e: QMessageBox.critical(self, 'Subtract', 'Unable to subtract the meshes. {}'.format(e)) def errormessage(self, message): self.figure.clear() self.canvas.draw() self.parent.statusbar.showMessage(message) def update_figure_range(self, key): if len(key) == 0: return for ax in self.figure.axes: plotaxes = self.table.plotaxes xlabel, ylabel = ax.get_xlabel(), ax.get_ylabel() if xlabel in plotaxes: xindex = plotaxes.index(xlabel) ax.set_xlim(key[xindex][0], key[xindex][1]) if ylabel in plotaxes: yindex = plotaxes.index(ylabel) ax.set_ylim(key[yindex][0], key[yindex][1]) self.canvas.draw() def update_colorbar(self, value): normlist = self.get_normlist() for im, norm in zip(self.figure_images, normlist): im.set_norm(norm) self.canvas.draw() @staticmethod def key_to_str(key): return list([s.start, s.stop] for s in key) @staticmethod def str_to_key(s): return tuple(slice(float(key[0]), float(key[1])) for key in s) def tofile(self, filename=None): dict = {} dict['filelist'] = self.table.filelist dict['key'] = self.key_to_str(self.key) dict['projection'] = self.projection if filename == None: filename = str(QFileDialog.getSaveFileName(self, 'Save Project', '.')) with open(filename, 'w') as fp: json.dump(dict, fp) @classmethod def fromfile(cls, filename=None, parent=None): if filename == None: filename = str(QFileDialog.getOpenFileName(cls, 'Open Project', '.', '*.proj')) try: with open(filename, 'r') as fp: dict = json.load(fp) except IOError as e: raise cls.error.showMessage("unable to open '{0}' as project file (original error: {1!r})".format(filename, e)) newlist = [] for fn in dict['filelist']: if not os.path.exists(fn): warningbox = QMessageBox(2, 'Warning', 'Cannot find space at path {0}; locate proper space'.format(fn), buttons=QMessageBox.Open) warningbox.exec_() newname = str(QFileDialog.getOpenFileName(caption='Open space {0}'.format(fn), directory='.', filter='*.hdf5')) newlist.append(newname) else: newlist.append(fn) widget = cls(newlist, cls.str_to_key(dict['key']), dict['projection'], parent=parent) return widget def addspace(self, filename=None, add=False): if filename == None: filename = str(QFileDialog.getOpenFileName(self, 'Open Project', '.', '*.hdf5')) self.table.add_space(filename, add) def save(self): dialog = QFileDialog(self, "Save image") dialog.setNameFilters(['Portable Network Graphics (*.png)', 'Portable Document Format (*.pdf)']) dialog.setDefaultSuffix('png') dialog.setFileMode(QFileDialog.AnyFile) dialog.setAcceptMode(QFileDialog.AcceptSave) if not dialog.exec_(): return fname = dialog.selectedFiles()[0] if not fname: return try: self.figure.savefig(str(fname)) except Exception as e: QMessageBox.critical(self, 'Save image', 'Unable to save image to {}: {}'.format(fname, e)) def space_to_file(self, fname): ext = os.path.splitext(fname)[-1] for i, filename in enumerate(self.table.selection): axes = self.table.getax(filename) space = self.table.getspace(filename, key=axes.restricted_key(self.key)) projection = [ax for ax in self.projection if ax in space.axes] if projection: space = space.project(*projection) space.trim() outfile = binoculars.util.find_unused_filename(fname) if ext == '.edf': binoculars.util.space_to_edf(space, outfile) self.parent.statusbar.showMessage('saved at {0}'.format(outfile)) elif ext == '.txt': binoculars.util.space_to_txt(space, outfile) self.parent.statusbar.showMessage('saved at {0}'.format(outfile)) elif ext == '.hdf5': space.tofile(outfile) self.parent.statusbar.showMessage('saved at {0}'.format(outfile)) else: self.parent.statusbar.showMessage('unknown extension {0}, unable to save!\n'.format(ext)) def short_filename(filename): return filename.split('/')[-1].split('.')[0] class SpaceContainer(QTableWidgetItem): def __init__(self, label, space=None): super(SpaceContainer, self).__init__(short_filename(label)) self.label = label self.space = space def get_space(self, key=None): if self.space == None: return binoculars.space.Space.fromfile(self.label, key=key) else: if key == None: key = Ellipsis return self.space[key] def get_ax(self): if self.space == None: return binoculars.space.Axes.fromfile(self.label) else: return self.space.axes def add_to_space(self, space): if self.space is None: newspace = binoculars.space.Space.fromfile(self.label) + space newspace.tofile(self.label) else: self.space += space class TableWidget(QWidget): selectionError = pyqtSignal(str, name = 'Selection Error') plotaxesChanged = pyqtSignal(binoculars.space.Axes, name = 'plot axes changed') def __init__(self, filelist = [],parent=None): super(TableWidget, self).__init__(parent) hbox = QHBoxLayout() self.table = QTableWidget(0, 4) self.table.setHorizontalHeaderLabels(['', 'filename','labels', 'remove']) for index, width in enumerate([25,150,50,70]): self.table.setColumnWidth(index, width) for filename in filelist: self.add_space(filename) hbox.addWidget(self.table) self.setLayout(hbox) def add_space(self, filename, add = True, space = None): index = self.table.rowCount() self.table.insertRow(index) checkboxwidget = QCheckBox() checkboxwidget.setChecked(add) checkboxwidget.clicked.connect(self.select) self.table.setCellWidget(index,0, checkboxwidget) container = SpaceContainer(filename, space) self.table.setItem(index, 1, container) item = QTableWidgetItem(','.join(list(ax.label.lower() for ax in container.get_ax()))) self.table.setItem(index, 2, item) buttonwidget = QPushButton('remove') buttonwidget.clicked.connect(lambda: self.remove(filename)) self.table.setCellWidget(index,3, buttonwidget) if add: self.select() def addfromserver(self, command, space): if not command in self.filelist: self.add_space(command, add = False, space = space) else: container = self.table.item(self.filelist.index(command), 1) container.add_to_space(space) def remove(self, filename): self.table.removeRow(self.filelist.index(filename)) self.select() print(('removed: {0}'.format(filename))) def select(self): axes = self.plotaxes if len(axes) > 0: self.plotaxesChanged.emit(axes) else: self.selectionError.emit('no spaces selected or spaces with non identical labels selected') @property def selection(self): return list(container.label for checkbox, container in zip(self.itercheckbox(), self.itercontainer()) if checkbox.checkState()) @property def plotaxes(self): axes = tuple(container.get_ax() for checkbox, container in zip(self.itercheckbox(), self.itercontainer()) if checkbox.checkState()) if len(axes) > 0: try: return binoculars.space.Axes(binoculars.space.union_unequal_axes(ax) for ax in zip(*axes)) except ValueError: return () else: return () @property def filelist(self): return list(container.label for container in self.itercontainer()) def getax(self, filename): index = self.filelist.index(filename) return self.table.item(index, 1).get_ax() def getspace(self, filename, key = None): index = self.filelist.index(filename) return self.table.item(index, 1).get_space(key) def itercheckbox(self): return iter(self.table.cellWidget(index, 0) for index in range(self.table.rowCount())) def itercontainer(self): return iter(self.table.item(index, 1) for index in range(self.table.rowCount())) class LimitWidget(QWidget): keydict = pyqtSignal(dict, name = "keydict") rangechange = pyqtSignal(list, name = "rangechange") def __init__(self, axes, parent=None): super(LimitWidget, self).__init__(parent) self.initUI(axes) def initUI(self, axes): self.axes = axes self.sliders = list() self.qlabels = list() self.leftindicator = list() self.rightindicator = list() labels = list(ax.label for ax in axes) vbox = QVBoxLayout() hbox = QHBoxLayout() self.projectionlabel = QLabel(self) self.projectionlabel.setText('projection along axis') self.refreshbutton = QPushButton('all') self.refreshbutton.clicked.connect(self.refresh) vbox.addWidget(self.projectionlabel) self.checkbox = list() self.state = list() for label in labels: self.checkbox.append(QCheckBox(label, self)) for box in self.checkbox: self.state.append(box.checkState()) hbox.addWidget(box) box.stateChanged.connect(self.update_checkbox) self.state = numpy.array(self.state, dtype = numpy.bool) self.init_checkbox() vbox.addLayout(hbox) for label in labels: self.qlabels.append(QLabel(self)) self.leftindicator.append(QLineEdit(self)) self.rightindicator.append(QLineEdit(self)) self.sliders.append(RangeSlider(Qt.Horizontal)) for index, label in enumerate(labels): box = QHBoxLayout() box.addWidget(self.qlabels[index]) box.addWidget(self.leftindicator[index]) box.addWidget(self.sliders[index]) box.addWidget(self.rightindicator[index]) vbox.addLayout(box) for left in self.leftindicator: left.setMaximumWidth(50) for right in self.rightindicator: right.setMaximumWidth(50) for index, label in enumerate(labels): self.qlabels[index].setText(label) for index, ax in enumerate(axes): self.sliders[index].setMinimum(0) self.sliders[index].setMaximum(len(ax) - 1) self.sliders[index].setLow(0) self.sliders[index].setHigh(len(ax) - 1) self.sliders[index].setTickPosition(QSlider.TicksBelow) self.update_lines() for slider in self.sliders: slider.sliderMoved.connect(self.update_lines) for slider in self.sliders: slider.sliderReleased.connect(self.send_signal) for line in self.leftindicator: line.editingFinished.connect(self.update_sliders_left) line.editingFinished.connect(self.send_signal) for line in self.rightindicator: line.editingFinished.connect(self.update_sliders_right) line.editingFinished.connect(self.send_signal) vbox.addWidget(self.refreshbutton) if self.layout() == None: self.setLayout(vbox) def refresh(self): for slider in self.sliders: slider.setLow(slider.minimum()) slider.setHigh(slider.maximum()) self.update_lines() self.send_signal() def update_lines(self, value=0): for index, slider in enumerate(self.sliders): self.leftindicator[index].setText(str(self.axes[index][slider.low()])) self.rightindicator[index].setText(str(self.axes[index][slider.high()])) key = list((float(str(left.text())), float(str(right.text()))) for left, right in zip(self.leftindicator, self.rightindicator)) self.rangechange.emit(key) def send_signal(self): signal = {} key = ((float(str(left.text())), float(str(right.text()))) for left, right in zip(self.leftindicator, self.rightindicator)) key = [left if left == right else slice(left, right, None) for left, right in key] project = [] for ax, state in zip(self.axes, self.state): if state: project.append(ax.label) signal['project'] = project signal['key'] = key self.keydict.emit(signal) def update_sliders_left(self): for ax, left, right , slider in zip(self.axes, self.leftindicator, self.rightindicator, self.sliders): try: leftvalue = ax.get_index(float(str(left.text()))) rightvalue = ax.get_index(float(str(right.text()))) if leftvalue >= slider.minimum() and leftvalue < rightvalue: slider.setLow(leftvalue) else: slider.setLow(rightvalue - 1) except ValueError: slider.setLow(0) left.setText(str(ax[slider.low()])) def update_sliders_right(self): for ax, left, right, slider in zip(self.axes, self.leftindicator, self.rightindicator, self.sliders): leftvalue = ax.get_index(float(str(left.text()))) try: rightvalue = ax.get_index(float(str(right.text()))) if rightvalue <= slider.maximum() and rightvalue > leftvalue: slider.setHigh(rightvalue) else: slider.setHigh(leftvalue + 1) except ValueError: slider.setHigh(len(ax) - 1) right.setText(str(ax[slider.high()])) def update_checkbox(self): self.state = list() for box in self.checkbox: self.state.append(box.checkState()) self.send_signal() def init_checkbox(self): while numpy.alen(self.state) - self.state.sum() > 2: _index = numpy.where(self.state == False)[-1] self.state[-1] = True for box, state in zip(self.checkbox, self.state): box.setChecked(state) def axes_update(self, axes): if not set(ax.label for ax in self.axes) == set(ax.label for ax in axes): QWidget().setLayout(self.layout()) self.initUI(axes) self.send_signal() else: low = tuple(self.axes[index][slider.low()] for index, slider in enumerate(self.sliders)) high = tuple(self.axes[index][slider.high()] for index, slider in enumerate(self.sliders)) for index, ax in enumerate(axes): self.sliders[index].setMinimum(0) self.sliders[index].setMaximum(len(ax) - 1) self.axes = axes for index, slider in enumerate(self.sliders): self.leftindicator[index].setText(str(low[index])) self.rightindicator[index].setText(str(high[index])) self.update_sliders_left() self.update_sliders_right() self.send_signal() def update_from_zoom(self, keydict): for key in keydict: index = self.axes.index(key) self.leftindicator[index].setText(str(keydict[key][0])) self.rightindicator[index].setText(str(keydict[key][1])) self.update_sliders_left() self.update_sliders_right() self.send_signal() def is_empty(key): for k in key: if isinstance(k, slice): if k.start == k.stop: return True return False if __name__ == '__main__': app = QApplication(sys.argv) binoculars.space.silence_numpy_errors() main = Window() main.resize(1000, 600) main.newproject() main.show() sys.exit(app.exec_()) binoculars-0.0.4/scripts/binoculars-processgui000077500000000000000000000404501343276063200216240ustar00rootroot00000000000000#!/usr/bin/env python """ binoculars gui for data processing Created on 2015-06-04 author: Remy Nencib (remy.nencib@esrf.r) """ import sys import os import time from PyQt5.Qt import (Qt) # noqa from PyQt5.QtGui import (QColor, QPalette) from PyQt5.QtWidgets import (QAction, QApplication, QTabWidget, QFileDialog, QMessageBox, QPushButton, QHBoxLayout, QVBoxLayout, QSplitter, QTableWidgetItem, QTableWidget, QLabel, QLineEdit, QMainWindow, QWidget, QComboBox, QProgressDialog, QDockWidget) def set_src(): import os.path as osp dirpath = osp.join(osp.dirname(osp.abspath(__file__)), osp.pardir) sys.path.insert(0, osp.abspath(dirpath)) try: import binoculars.main import binoculars.util except ImportError: # try to use code from src distribution set_src() import binoculars.main import binoculars.util #--------------------------------------------CREATE MAIN WINDOW---------------------------------------- class Window(QMainWindow): def __init__(self): super(Window, self).__init__() self.initUI() self.tab_widget = QTabWidget(self) self.setCentralWidget(self.tab_widget) # add the close button for tabs self.tab_widget.setTabsClosable(True) self.tab_widget.tabCloseRequested.connect(self.close_tab) #method for close tabs def close_tab(self, tab): self.tab_widget.removeTab(tab) def initUI(self): #we create the menu bar openfile = QAction('Open', self) openfile.setShortcut('Ctrl+O') openfile.setStatusTip('Open new File') openfile.triggered.connect(self.ShowFile) savefile = QAction('Save', self) savefile.setShortcut('Ctrl+S') savefile.setStatusTip('Save File') savefile.triggered.connect(self.Save) create = QAction('Create', self) create.setStatusTip('Create Configfile') create.triggered.connect(self.New_Config) menubar = self.menuBar() filemenu = menubar.addMenu('&File') filemenu.addAction(openfile) filemenu.addAction(savefile) filemenu = menubar.addMenu('&New Configfile') filemenu.addAction(create) #we configue the main windows palette = QPalette() palette.setColor(QPalette.Background, Qt.gray) self.setPalette(palette) self.setGeometry(50, 100, 700, 700) self.setWindowTitle('Binoculars processgui') self.show() self.ListCommand = QTableWidget(1, 2, self) self.ListCommand.verticalHeader().setVisible(True) self.ListCommand.horizontalHeader().setVisible(False) self.ListCommand.horizontalHeader().stretchSectionCount() self.ListCommand.setColumnWidth(0, 80) self.ListCommand.setColumnWidth(1, 80) self.ListCommand.setRowCount(0) self.buttonDelete = QPushButton('Delete', self) self.buttonDelete.clicked.connect(self.removeConf) self.process = QPushButton('run', self) self.process.setStyleSheet("background-color: darkred") self.process.clicked.connect(self.run) self.wid = QWidget() self.CommandLayout = QVBoxLayout() self.CommandLayout.addWidget(self.ListCommand) self.CommandLayout.addWidget(self.process) self.CommandLayout.addWidget(self.buttonDelete) self.wid.setLayout(self.CommandLayout) self.Dock = QDockWidget() self.Dock.setAllowedAreas(Qt.LeftDockWidgetArea) self.Dock.setFeatures(QDockWidget.NoDockWidgetFeatures) self.Dock.setWidget(self.wid) self.Dock.setMaximumWidth(200) self.Dock.setMinimumWidth(200) self.addDockWidget(Qt.DockWidgetArea(1), self.Dock) def removeConf(self): self.ListCommand.removeRow(self.ListCommand.currentRow()) def Add_To_Liste(self, xxx_todo_changeme): (command, cfg) = xxx_todo_changeme row = self.ListCommand.rowCount() index = self.tab_widget.currentIndex() filename = self.tab_widget.tabText(index) self.ListCommand.insertRow(self.ListCommand.rowCount()) dic = {filename: cfg} self.item1 = QTableWidgetItem(str(command)) self.item1.command = command self.item2 = QTableWidgetItem(str(filename)) self.item2.cfg = dic[filename] self.ListCommand.setItem(row, 0, self.item1) self.ListCommand.setItem(row, 1, self.item2) #We run the script and create a hdf5 file def run(self): maximum = self.ListCommand.rowCount() pd = QProgressDialog('running', 'Cancel', 0, maximum, self) pd.setWindowModality(Qt.WindowModal) pd.show() def progress(cfg, command): if pd.wasCanceled(): raise KeyboardInterrupt QApplication.processEvents() return binoculars.main.Main.from_object(cfg, command) try: for index in range(self.ListCommand.rowCount()): pd.setValue(index) cfg = self.ListCommand.item(index, 1).cfg command = self.ListCommand.item(index, 0).command print(cfg) progress(cfg, command) self.ListCommand.clear() self.ListCommand.setRowCount(0) except BaseException as e: #cfg = self.ListCommand.item(index,1).cfg #print cfg QMessageBox.about(self, "Error", "There was an error processing one of the scans: {0}".format(e)) finally: pd.close() #we call the load function def ShowFile(self): filename = QFileDialog.getOpenFileName(self, 'Open File', '') confwidget = Conf_Tab(self) confwidget.read_data(str(filename)) newIndex = self.tab_widget.addTab(confwidget, os.path.basename(str(filename))) confwidget.command.connect(self.Add_To_Liste) self.tab_widget.setCurrentIndex(newIndex) #we call the save function def Save(self): filename = QFileDialog().getSaveFileName(self, 'Save', '', '*.txt') widget = self.tab_widget.currentWidget() widget.save(filename) #we call the new tab conf def New_Config(self): widget = Conf_Tab(self) self.tab_widget.addTab(widget, 'New configfile') widget.command.connect(self.Add_To_Liste) #---------------------------------------------------------------------------------------------------- #-----------------------------------------CREATE TABLE----------------------------------------------- class Table(QWidget): def __init__(self, label, parent=None): super(Table, self).__init__() # create a QTableWidget self.table = QTableWidget(1, 2, self) self.table.setHorizontalHeaderLabels(['Parameter', 'Value', 'Comment']) self.table.horizontalHeader().setStretchLastSection(True) self.table.verticalHeader().setVisible(False) self.table.setTextElideMode(Qt.ElideLeft) #create combobox self.combobox = QComboBox() #add items self.cell = QTableWidgetItem("type") self.table.setItem(0, 0, self.cell) self.table.setCellWidget(0, 1, self.combobox) #we create pushbuttons and we call the method when we clic on self.btn_add_row = QPushButton('+', self) self.btn_add_row.clicked.connect(self.add_row) self.buttonRemove = QPushButton('-', self) self.buttonRemove.clicked.connect(self.remove) #the dispositon of the table and the butttons vbox = QVBoxLayout() hbox = QHBoxLayout() hbox.addWidget(self.btn_add_row) hbox.addWidget(self.buttonRemove) vbox.addWidget(label) vbox.addLayout(hbox) vbox.addWidget(self.table) self.setLayout(vbox) def add_row(self): self.table.insertRow(self.table.rowCount()) def remove(self): self.table.removeRow(self.table.currentRow()) def get_keys(self): return list(str(self.table.item(index, 0).text()) for index in range(self.table.rowCount())) #Here we take all values from tables def getParam(self): for index in range(self.table.rowCount()): if not self.table.item == None: key = str(self.table.item(index, 0).text()) comment = str(self.table.item(index, 0).toolTip()) if index == 0: yield key, str(self.table.cellWidget(index, 1).currentText()), comment elif self.table.item(index, 1): if len(str(self.table.item(index, 1).text())) != 0 and self.table.item(index, 0).textColor() == QColor('black'): yield key, str(self.table.item(index, 1).text()), comment #Here we put all values in tables def addData(self, cfg): for item in cfg: if item == 'type': box = self.table.cellWidget(0, 1) value = cfg[item].split(':') if len(value) > 1: box.setCurrentIndex(box.findText(value[1], Qt.MatchFixedString)) else: box.setCurrentIndex(box.findText(cfg[item], Qt.MatchFixedString)) elif item not in self.get_keys(): self.add_row() row = self.table.rowCount() for col in range(self.table.columnCount()): if col == 0: newitem = QTableWidgetItem(item) self.table.setItem(row - 1, col, newitem) if col == 1: newitem2 = QTableWidgetItem(cfg[item]) self.table.setItem(row - 1, col, newitem2) else: index = self.get_keys().index(item) self.table.item(index, 1).setText(cfg[item]) def addDataConf(self, options): keys = self.get_keys() newconfigs = dict((option[0], '') for option in options if option[0] not in keys) self.addData(newconfigs) names = list(option[0] for option in options) for index, key in enumerate(self.get_keys()): if str(key) in names: self.table.item(index, 0).setTextColor(QColor('black')) self.table.item(index, 0).setToolTip(options[names.index(key)][1]) elif str(key) == 'type': self.table.item(index, 0).setTextColor(QColor('black')) else: self.table.item(index, 0).setTextColor(QColor('gray')) def add_to_combo(self, items): self.combobox.clear() self.combobox.addItems(items) #---------------------------------------------------------------------------------------------------- #-----------------------------------------CREATE CONFIG---------------------------------------------- class Conf_Tab(QWidget): def __init__(self, parent=None): super(Conf_Tab, self).__init__(parent) #we create 3 tables self.Dis = Table(QLabel('Dispatcher :')) self.Inp = Table(QLabel('Input :')) self.Pro = Table(QLabel('Projection :')) self.select = QComboBox() backends = list(backend.lower() for backend in binoculars.util.get_backends()) #we add the list of different backends on the select combobox self.select.addItems(backends) self.add = QPushButton('add') self.add.clicked.connect(self.AddCommand) self.scan = QLineEdit() self.scan.setToolTip('scan selection example: 820 824') vbox = QVBoxLayout() hbox = QHBoxLayout() splitter = QSplitter(Qt.Horizontal) splitter.addWidget(self.Dis) splitter.addWidget(self.Inp) splitter.addWidget(self.Pro) hbox.addWidget(splitter) commandbox = QHBoxLayout() commandbox.addWidget(self.add) commandbox.addWidget(self.scan) vbox.addWidget(self.select) vbox.addLayout(hbox) vbox.addLayout(commandbox) #the dispositon of all elements of the gui #Layout = QGridLayout() #Layout.addWidget(label1,1,1,1,2) #Layout.addWidget(label2,1,0,1,2) #Layout.addWidget(label3,1,2,1,2) #Layout.addWidget(self.select,0,0) #Layout.addWidget(self.Dis,2,1) #Layout.addWidget(self.Inp,2,0) #Layout.addWidget(self.Pro,2,2) #Layout.addWidget(self.add,3,0) #Layout.addWidget(self.scan,3,1) self.setLayout(vbox) #Here we call all methods for selected an ellement on differents combobox self.Dis.add_to_combo(binoculars.util.get_dispatchers()) self.select.activated['QString'].connect(self.DataCombo) self.Inp.combobox.activated.connect(self.DataTableInp) self.Pro.combobox.activated.connect(self.DataTableInpPro) self.Dis.combobox.activated.connect(self.DataTableInpDis) def DataCombo(self, text): self.Inp.add_to_combo(binoculars.util.get_inputs(str(text))) self.Pro.add_to_combo(binoculars.util.get_projections(str(text))) self.DataTableInp() self.DataTableInpPro() self.DataTableInpDis() def DataTableInp(self): backend = str(self.select.currentText()) inp = binoculars.util.get_input_configkeys(backend, str(self.Inp.combobox.currentText())) self.Inp.addDataConf(inp) def DataTableInpPro(self): backend = str(self.select.currentText()) proj = binoculars.util.get_projection_configkeys(backend, str(self.Pro.combobox.currentText())) self.Pro.addDataConf(proj) def DataTableInpDis(self): disp = binoculars.util.get_dispatcher_configkeys(str(self.Dis.combobox.currentText())) self.Dis.addDataConf(disp) #The save method we take all ellements on tables and we put them in this format {0} = {1} #{2} def save(self, filename): with open(filename, 'w') as fp: fp.write('[dispatcher]\n') # cycles over the iterator object for key, value, comment in self.Dis.getParam(): fp.write('{0} = {1} #{2}\n'.format(key, value, comment)) fp.write('[input]\n') for key, value, comment in self.Inp.getParam(): if key == 'type': value = '{0}:{1}'.format(self.select.currentText(), value) fp.write('{0} = {1} #{2}\n'.format(key, value, comment)) fp.write('[projection]\n') for key, value, comment in self.Pro.getParam(): if key == 'type': value = '{0}:{1}'.format(self.select.currentText(), value) fp.write('{0} = {1} #{2}\n'.format(key, value, comment)) #This method take the name of objects and values for run the script def get_configobj(self): inInp = {} inDis = {} inPro = {} inDis = dict((key, value) for key, value, comment in self.Dis.getParam()) for key, value, comment in self.Inp.getParam(): if key == 'type': value = '{0}:{1}'.format(str(self.select.currentText()).strip(), value) inInp[key] = value for key, value, comment in self.Pro.getParam(): if key == 'type': value = '{0}:{1}'.format(str(self.select.currentText()).strip(), value) inPro[key] = value cfg = binoculars.util.ConfigFile('processgui {0}'.format(time.strftime('%d %b %Y %H:%M:%S', time.localtime()))) setattr(cfg, 'input', inInp) setattr(cfg, 'dispatcher', inDis) setattr(cfg, 'projection', inPro) return cfg #This method take elements on a text file or the binocular script and put them on tables def read_data(self, filename): cfg = binoculars.util.ConfigFile.fromtxtfile(str(filename)) input_type = cfg.input['type'] backend, value = input_type.strip(' ').split(':') self.select.setCurrentIndex(self.select.findText(backend, Qt.MatchFixedString)) self.DataCombo(backend) self.Dis.addData(cfg.dispatcher) self.Inp.addData(cfg.input) self.Pro.addData(cfg.projection) #we add command on the DockWidget def AddCommand(self): scan = [str(self.scan.text())] cfg = self.get_configobj() commandconfig = (scan, cfg) self.command.emit(commandconfig) if __name__ == '__main__': app = QApplication(sys.argv) main = Window() main.show() sys.exit(app.exec_()) binoculars-0.0.4/scripts/binoculars-server000077500000000000000000000113501343276063200207440ustar00rootroot00000000000000#!/usr/bin/env python '''Serverqueue where jobs can be submitted. Jobs will be calculated on the spot or passed on to the OAR cluster if so specified in the configfile. Jobs can be submitted in a json dictionary. The keyword 'command' and 'configfilename' supply a string with the command and the path to the configfile. Everything else is assumed to be an override in the configfile. If an override cannot be parsed the job will start anyway without the override. The processingqueue cannot be interrupted. ''' import socket import threading import time import sys import traceback import json import os #python3 support PY3 = sys.version_info > (3,) if PY3: import socketserver import queue else: import SocketServer as socketserver import Queue as queue def set_src(): import sys import os.path as osp dirpath = osp.join(osp.dirname(osp.abspath(__file__)), osp.pardir) sys.path.insert(0, osp.abspath(dirpath)) try: import binoculars.main import binoculars.util except ImportError: # try to use code from src distribution set_src() import binoculars.main import binoculars.util class ProcessTCPHandler(socketserver.BaseRequestHandler): def handle(self): input = self.request.recv(1024) if input.startswith('test'): print('Recieved test request') self.request.sendall('Connection succesful') else: try: job = json.loads(input) parsed, result = parse_job(job) if parsed: print('Recieved command: {0}. Job is added to queue.\nNumber of jobs left in queue: {1}'.format(job['command'], self.server.q.qsize())) response = 'Job added to queue' self.server.q.put(job) else: response = result except: print('Could not parse the job: {0}'.format(input)) print(traceback.format_exc()) response = 'Error: Job could not be added to queue' finally: self.request.sendall(response) def parse_job(job): try: overrides = [] for key in list(job.keys()): if not key in ['command', 'configfilename']: section_key, value = job[key].split('=') section, key = section_key.split(':') overrides.append((section, key, value)) return True, overrides except: message = 'Error parsing the configuration options. {0}'.format(job) return False, message def process(run_event, ip, port, q): while run_event.is_set(): if q.empty(): time.sleep(1) else: job = q.get() # assume everything in the list is an override except for command and configfilename command = str(job['command']) configfilename = job['configfilename'] overrides = parse_job(job)[1] # [1] are the succesfully parsed jobs print('Start processing: {0}'.format(command)) try: configobj = binoculars.util.ConfigFile.fromtxtfile(configfilename, overrides=overrides) if binoculars.util.parse_bool(configobj.dispatcher['send_to_gui']): configobj.dispatcher['host'] = ip configobj.dispatcher['port'] = port binoculars.main.Main.from_object(configobj, [command]) print('Succesfully finished processing: {0}.'.format(command)) except Exception as exc: errorfilename = 'error_{0}.txt'.format(command) print('An error occured for scan {0}. For more information see {1}'.format(command, errorfilename)) with open(errorfilename, 'w') as fp: traceback.print_exc(file=fp) finally: print('Number of jobs left in queue: {0}'.format(q.qsize())) if __name__ == '__main__': if len(sys.argv) > 1: ip = sys.argv[1] port = sys.argv[2] else: ip = None port = None q = queue.Queue() binoculars.util.register_python_executable(os.path.join(os.path.dirname(__file__), 'binoculars.py')) HOST, PORT = socket.gethostbyname(socket.gethostname()), 0 run_event = threading.Event() run_event.set() process_thread = threading.Thread(target=process, args=(run_event, ip, port, q)) process_thread.start() server = socketserver.TCPServer((HOST, PORT), ProcessTCPHandler) server.q = q ip, port = server.server_address print('Process server started running at ip {0} and port {1}. Interrupt server with Ctrl-C'.format(ip, port)) try: server.serve_forever() except KeyboardInterrupt: run_event.clear() process_thread.join() binoculars-0.0.4/setup.py000066400000000000000000000040431343276063200153750ustar00rootroot00000000000000# -*- encoding: utf-8 -*- import os from setuptools import setup, find_packages description = ("Data reduction and analysis software for two-dimensional " "detectors in surface X-ray diffraction") long_description =\ """ BINoculars is a tool for data reduction and analysis of large sets of surface diffraction data that have been acquired with a two-dimensional X-ray detector. The intensity of each pixel of a two-dimensional detector is projected onto a three-dimensional grid in reciprocal-lattice coordinates using a binning algorithm. This allows for fast acquisition and processing of high-resolution data sets and results in a significant reduction of the size of the data set. The subsequent analysis then proceeds in reciprocal space. It has evolved from the specific needs of the ID03 beamline at the ESRF, but it has a modular design and can be easily adjusted and extended to work with data from other beamlines or from other measurement techniques.""" scripts = [os.path.join("scripts", d) for d in ["binoculars-fitaid", "binoculars-gui", "binoculars-processgui", "binoculars"]] install_requires = ['h5py', 'numpy', 'matplotlib', 'pyFAI', 'PyMca5', 'PyQt5'] setup(name='binoculars', version='0.0.1', description=description, long_description=long_description, packages=find_packages(exclude=["*.test", "*.test.*", "test.*", "test"]), install_requires=install_requires, scripts=scripts, author="Willem Onderwaater, Sander Roobol, Frédéric-Emmanuel Picca", author_email="onderwaa@esrf.fr, picca@synchrotron-soleil.fr", url='FIXME', license='GPL-3', classifiers=[ 'Topic :: Scientific/Engineering', 'Development Status :: 3 - Alpha', 'Operating System :: POSIX', 'Operating System :: Unix', 'Programming Language :: Python :: 2.7 3.5 3.6 3.7'] ) binoculars-0.0.4/test/000077500000000000000000000000001343276063200146415ustar00rootroot00000000000000binoculars-0.0.4/test/__init__.py000066400000000000000000000000001343276063200167400ustar00rootroot00000000000000binoculars-0.0.4/test/cfg.py000066400000000000000000000012471343276063200157560ustar00rootroot00000000000000import binoculars.util import os import unittest class TestCase(unittest.TestCase): def setUp(self): fn = 'examples/configs/example_config_id03' self.cfg = binoculars.util.ConfigFile.fromtxtfile(fn) def test_IO(self): self.cfg.totxtfile('test.txt') self.cfg.tofile('test.hdf5') print(binoculars.util.ConfigFile.fromfile('test.hdf5')) self.assertRaises(IOError, binoculars.util.ConfigFile.fromtxtfile, '') self.assertRaises(IOError, binoculars.util.ConfigFile.fromfile, '') def tearDown(self): os.remove('test.txt') os.remove('test.hdf5') if __name__ == '__main__': unittest.main() binoculars-0.0.4/test/id03.py000066400000000000000000000034201343276063200157510ustar00rootroot00000000000000from binoculars.backends import id03 import binoculars.util import binoculars.space import os import numpy import unittest class TestCase(unittest.TestCase): def setUp(self): cfg_unparsed = {} specfile = os.path.join(os.path.split(os.getcwd())[0], 'binoculars-binaries/examples/dataset/sixc_tutorial.spec' ) cfg_unparsed['specfile'] = specfile cfg_unparsed['sdd'] = '1000' cfg_unparsed['pixelsize'] = '0.055, 0.055' cfg_unparsed['imagefolder'] = specfile.replace('sixc_tutorial.spec', 'images') cfg_unparsed['centralpixel'] = '50 ,50' numpy.save('mask.npy', numpy.identity(516)) cfg_unparsed['maskmatrix'] = 'mask.npy' self.id03input = id03.EH2(cfg_unparsed) self.projection = id03.HKLProjection({'resolution' : '0.01', 'limits' : '[0:, :-1, 0:0.2]'}) def test_IO(self): jobs = list(self.id03input.generate_jobs(['820'])) destination_opts = self.id03input.get_destination_options(['820']) imagedata = self.id03input.process_job(jobs[0]) intensity, weights, coords = imagedata.next() projected = self.projection.project(*coords) limits = self.projection.config.limits space1 = binoculars.space.Space.from_image(self.projection.config.resolution, self.projection.get_axis_labels(), projected, intensity, weights, limits = limits[0]) print(space1) intensity, weights, coords = imagedata.next() projected = self.projection.project(*coords) space2 = binoculars.space.Space.from_image(self.projection.config.resolution, self.projection.get_axis_labels(), projected, intensity, weights) print(space1 + space2) def tearDown(self): os.remove('mask.npy') if __name__ == '__main__': unittest.main() binoculars-0.0.4/test/metadata.py000066400000000000000000000031021343276063200167670ustar00rootroot00000000000000import binoculars.util import binoculars.space import os import numpy import unittest class TestCase(unittest.TestCase): def setUp(self): fn = 'examples/configs/example_config_id03' self.cfg = binoculars.util.ConfigFile.fromtxtfile(fn) def test_IO(self): test = {'string' : 'string', 'numpy.array' : numpy.arange(10), 'list' : range(10), 'tuple' : tuple(range(10))} metasection = binoculars.util.MetaBase() metasection.add_section('first', test) print(metasection) metadata = binoculars.util.MetaData() metadata.add_dataset(metasection) metadata.add_dataset(self.cfg) metadata.tofile('test.hdf5') metadata += binoculars.util.MetaData.fromfile('test.hdf5') axis = tuple(binoculars.space.Axis(0,10,1,label) for label in ['h', 'k', 'l']) axes = binoculars.space.Axes(axis) space = binoculars.space.Space(axes) spacedict = dict(z for z in zip('abcde', range(5))) dataset = binoculars.util.MetaBase('fromspace', spacedict) space.metadata.add_dataset(dataset) space.tofile('test2.hdf5') testspace = binoculars.space.Space.fromfile('test2.hdf5') print(space + testspace).metadata print('--------------------------------------------------------') print(metadata) print(metadata.serialize()) print(binoculars.util.MetaData.fromserial(metadata.serialize())) def tearDown(self): os.remove('test.hdf5') os.remove('test2.hdf5') if __name__ == '__main__': unittest.main()