spykeutils-0.4.3/0000755000175000017500000000000012664623646012077 5ustar robrobspykeutils-0.4.3/spykeutils/0000755000175000017500000000000012664623646014313 5ustar robrobspykeutils-0.4.3/spykeutils/plugin/0000755000175000017500000000000012664623646015611 5ustar robrobspykeutils-0.4.3/spykeutils/plugin/data_provider_neo.py0000644000175000017500000011707212664623646021657 0ustar robrobimport os import sys from copy import copy from collections import OrderedDict import traceback import atexit import neo from data_provider import DataProvider from .. import conversions as convert class NeoDataProvider(DataProvider): """ Base class for data providers using NEO""" # Dictionary of block lists, indexed by (filename, block index) tuples loaded_blocks = {} # Dictionary of index in file, indexed by block object block_indices = {} # Dictionary of io, indexed by block object block_ios = {} # Dictionary of io (IO name, read paramters) tuples for loaded blocks block_read_params = {} # Mode for data lazy loading: # 0 - Full load # 1 - Lazy load # 2 - Caching lazy load data_lazy_mode = 0 # Mode for lazy cascade cascade_lazy = False # Forced IO class for all files. If None, determine by file extension. forced_io = None # Active IO read parameters (dictionary indexed by IO class) io_params = {} def __init__(self, name, progress): super(NeoDataProvider, self).__init__(name, progress) @classmethod def clear(cls): """ Clears cached blocks """ cls.loaded_blocks.clear() cls.block_indices.clear() cls.block_read_params.clear() ios = set() for io in cls.block_ios.itervalues(): if io in ios: continue if hasattr(io, 'close'): io.close() ios.add(io) cls.block_ios.clear() @classmethod def get_block(cls, filename, index, lazy=None, force_io=None, read_params=None): """ Return the block at the given index in the specified file. :param str filename: Path to the file from which to load the block. :param int index: The index of the block in the file. :param int lazy: Override global lazy setting if not ``None``: 0 regular load, 1 lazy load, 2 caching lazy load. :param force_io: Override global forced_io for the Neo IO class to use when loading the file. If ``None``, the global forced_io is used. :param dict read_params: Override read parameters for the IO that will load the block. If ``None``, the global io_params are used. """ if lazy is None: lazy = cls.data_lazy_mode > 0 else: lazy = lazy > 0 if force_io is None: force_io = cls.forced_io if filename in cls.loaded_blocks: return cls.loaded_blocks[filename][index] io, blocks = cls._load_neo_file(filename, lazy, force_io, read_params) if io and not lazy and not cls.cascade_lazy and hasattr(io, 'close'): io.close() if blocks is None: return None return blocks[index] @classmethod def get_blocks(cls, filename, lazy=None, force_io=None, read_params=None): """ Return a list of blocks loaded from the specified file :param str filename: Path to the file from which to load the blocks. :param int lazy: Override global lazy setting if not ``None``: 0 regular load, 1 lazy load, 2 caching lazy load. :param force_io: Override global forced_io for the Neo IO class to use when loading the file. If ``None``, the global forced_io is used. :param dict read_params: Override read parameters for the IO that will load the block. If ``None``, the global io_params are used. """ if lazy is None: lazy = cls.data_lazy_mode > 0 else: lazy = lazy > 0 if force_io is None: force_io = cls.forced_io if filename in cls.loaded_blocks: return cls.loaded_blocks[filename] io, blocks = cls._load_neo_file(filename, lazy, force_io, read_params) if io and not lazy and not cls.cascade_lazy and hasattr(io, 'close'): io.close() return blocks @classmethod def _load_neo_file(cls, filename, lazy, force_io, read_params): """ Returns a NEO io object and a list of contained blocks for a file name. This function also caches all loaded blocks :param str filename: The full path of the file (relative or absolute). :param bool lazy: Determines if lazy mode is used for Neo io. :param force_io: IO class to use for loading. If None, determined by file extension or through trial and error for directories. :param dict read_params: Override read parameters for the IO that will load the block. If ``None``, the global io_params are used. """ cascade = 'lazy' if cls.cascade_lazy else True if os.path.isdir(filename): if force_io: try: n_io = force_io(filename) if read_params is None: rp = cls.io_params.get(force_io, {}) else: rp = read_params content = n_io.read(lazy=lazy, cascade=cascade, **rp) if force_io == neo.TdtIO and \ isinstance(content, neo.Block) and \ not content.segments: # TdtIO can produce empty blocks for invalid dirs sys.stderr.write( 'Could not load any blocks from "%s"' % filename) return None, None return cls._content_loaded( content, filename, lazy, n_io, rp) except Exception, e: sys.stderr.write( 'Load error for directory "%s":\n' % filename) tb = sys.exc_info()[2] while not ('self' in tb.tb_frame.f_locals and tb.tb_frame.f_locals['self'] == n_io): if tb.tb_next is not None: tb = tb.tb_next else: break traceback.print_exception(type(e), e, tb) else: for io in neo.io.iolist: if io.mode == 'dir': try: n_io = io(filename) if read_params is None: rp = cls.io_params.get(force_io, {}) else: rp = read_params content = n_io.read(lazy=lazy, cascade=cascade, **rp) if io == neo.TdtIO and \ isinstance(content, neo.Block) and \ not content.segments: # TdtIO can produce empty blocks for invalid dirs continue return cls._content_loaded( content, filename, lazy, n_io, rp) except Exception, e: sys.stderr.write( 'Load error for directory "%s":\n' % filename) tb = sys.exc_info()[2] while not ('self' in tb.tb_frame.f_locals and tb.tb_frame.f_locals['self'] == n_io): if tb.tb_next is not None: tb = tb.tb_next else: break traceback.print_exception(type(e), e, tb) else: if force_io: if read_params is None: rp = cls.io_params.get(force_io, {}) else: rp = read_params return cls._load_file_with_io(filename, force_io, lazy, rp) extension = filename.split('.')[-1] for io in neo.io.iolist: if extension in io.extensions: if read_params is None: rp = cls.io_params.get(io, {}) else: rp = read_params return cls._load_file_with_io(filename, io, lazy, rp) return None, None @classmethod def _content_loaded(cls, content, filename, lazy, n_io, read_params): if isinstance(content, neo.Block): # Neo 0.2.1 cls.block_indices[content] = 0 cls.loaded_blocks[filename] = [content] cls.block_read_params[content] = (type(n_io).__name__, read_params) if lazy or cls.cascade_lazy: cls.block_ios[content] = n_io return n_io, [content] # Neo >= 0.3.0, read() returns a list of blocks blocks = content for i, b in enumerate(blocks): cls.block_indices[b] = i cls.block_read_params[b] = (type(n_io).__name__, read_params) if lazy or cls.cascade_lazy: cls.block_ios[b] = n_io cls.loaded_blocks[filename] = blocks return n_io, blocks @classmethod def _load_file_with_io(cls, filename, io, lazy, read_params): if io == neo.NeoHdf5IO: # Fix unicode problem with pyinstaller if hasattr(sys, 'frozen'): filename = filename.encode('UTF-8') n_io = io(filename=filename) if read_params is None: rp = cls.io_params.get(io, {}) else: rp = read_params try: cascade = 'lazy' if cls.cascade_lazy else True if hasattr(io, 'read_all_blocks'): # Neo 0.2.1 content = n_io.read_all_blocks(lazy=lazy, cascade=cascade, **rp) else: content = n_io.read(lazy=lazy, cascade=cascade, **rp) return cls._content_loaded(content, filename, lazy, n_io, rp) except Exception, e: sys.stderr.write( 'Load error for file "%s":\n' % filename) tb = sys.exc_info()[2] while not ('self' in tb.tb_frame.f_locals and tb.tb_frame.f_locals['self'] == n_io): if tb.tb_next is not None: tb = tb.tb_next else: break traceback.print_exception(type(e), e, tb) return None, None @classmethod def _get_data_from_viewer(cls, viewer): """ Return a dictionary with selection information from viewer """ # The links in this data format are based list indices data = {} data['type'] = 'Neo' # Block entry: (Index of block in file, file location of block, # block IO class name, block IO read parameters) block_list = [] block_indices = {} selected_blocks = viewer.neo_blocks() block_files = viewer.neo_block_file_names() for b in selected_blocks: block_indices[b] = len(block_list) block_list.append([NeoDataProvider.block_indices[b], block_files[b], cls.block_read_params[b][0], cls.block_read_params[b][1]]) data['blocks'] = block_list # Recording channel group entry: # (Index of rcg in block, index of block) rcg_list = [] rcg_indices = {} selected_rcg = viewer.neo_channel_groups() for rcg in selected_rcg: rcg_indices[rcg] = len(rcg_list) idx = rcg.block.recordingchannelgroups.index(rcg) rcg_list.append([idx, block_indices[rcg.block]]) data['channel_groups'] = rcg_list # Recording channel entry: (Index of channel in rcg, index of rcg) # There can be multiple channel entries for one channel object, if # it is part of multiple channel groups channel_list = [] selected_channels = viewer.neo_channels() for c in selected_channels: for rcg in c.recordingchannelgroups: if rcg in rcg_indices: idx = rcg.recordingchannels.index(c) channel_list.append([idx, rcg_indices[rcg]]) data['channels'] = channel_list # Segment entry: (Index of segment in block, index of block) segment_list = [] segment_indices = {} selected_segments = viewer.neo_segments() for s in selected_segments: segment_indices[s] = len(segment_list) idx = s.block.segments.index(s) segment_list.append([idx, block_indices[s.block]]) data['segments'] = segment_list # Unit entry: (Index of unit in rcg, index of rcg) unit_list = [] selected_units = viewer.neo_units() for u in selected_units: segment_indices[u] = len(segment_list) rcg_id = None if u.recordingchannelgroup is None \ else u.recordingchannelgroup.units.index(u) rcg = rcg_indices[u.recordingchannelgroup] \ if u.recordingchannelgroup else None unit_list.append([rcg_id, rcg]) data['units'] = unit_list return data @staticmethod def find_io_class(name): """ Return the Neo IO class with a given name. :param str name: Class name of the desired IO class. """ for io in neo.io.iolist: if io.__name__ == name: return io return None def _active_block(self, old): """ Return a copy of all selected elements in the given block. Only container objects are copied, data objects are linked. Needs to load all lazily loaded objects and will cache them regardless of current lazy_mode, """ block = copy(old) block.segments = [] selected_segments = set(self.segments() + [None]) selected_rcgs = set(self.recording_channel_groups() + [None]) selected_channels = set(self.recording_channels() + [None]) selected_units = set(self.units() + [None]) for s in old.segments: if s in selected_segments: segment = copy(s) segment.analogsignals = [self._load_lazy_object(sig, True) for sig in s.analogsignals if sig.recordingchannel in selected_channels] segment.analogsignalarrays = [ self._load_lazy_object(asa, True) for asa in s.analogsignalarrays if asa.recordingchannelgroup in selected_rcgs] segment.irregularlysampledsignals = [ self._load_lazy_object(iss, True) for iss in s.irregularlysampledsignals if iss.recordingchannel in selected_channels] segment.spikes = [self._load_lazy_object(sp, True) for sp in s.spikes if sp.unit in selected_units] segment.spiketrains = [self._load_lazy_object(st, True) for st in s.spiketrains if st.unit in selected_units] segment.block = block block.segments.append(segment) block.recordingchannelgroups = [] for old_rcg in old.recordingchannelgroups: if old_rcg in selected_rcgs: rcg = copy(old_rcg) rcg.analogsignalarrays = [ self._load_lazy_object(asa, True) for asa in old_rcg.analogsignalarrays if asa.segment in selected_segments] rcg.recordingchannels = [] for c in old_rcg.recordingchannels: if not c in selected_channels: continue channel = copy(c) channel.analogsignals = [ self._load_lazy_object(sig, True) for sig in c.analogsignals if sig.segment in selected_segments] channel.irregularlysampledsignals = [ self._load_lazy_object(iss, True) for iss in c.irregularlysampledsignals if iss.segment in selected_segments] channel.recordingchannelgroups = copy( c.recordingchannelgroups) channel.recordingchannelgroups.insert( channel.recordingchannelgroups.index(old_rcg), rcg) channel.recordingchannelgroups.remove(old_rcg) rcg.recordingchannels.append(channel) rcg.units = [] for u in old_rcg.units: if not u in selected_units: continue unit = copy(u) unit.spikes = [self._load_lazy_object(sp, True) for sp in u.spikes if sp.segment in selected_segments] unit.spiketrains = [self._load_lazy_object(st, True) for st in u.spiketrains if st.segment in selected_segments] unit.recordingchannelgroup = rcg rcg.units.append(unit) rcg.block = block block.recordingchannelgroups.append(rcg) return block def _get_object_io(self, o): """ Find the IO for an object. Return ``None`` if no IO exists. """ if o.segment: return self.block_ios.get(o.segment.block, None) if hasattr(object, 'recordingchannelgroups'): if o.recordingchannelgroups: return self.block_ios.get( o.recordingchannelgroups[0].block, None) if hasattr(object, 'recordingchannel'): c = o.recordingchannel if c.recordingchannelgroups: return self.block_ios.get( c.recordingchannelgroups[0].block, None) return None def _load_lazy_object(self, o, change_links=False): """ Return a loaded version of a lazily loaded object. The IO needs a ``read_lazy_object`` that takes a lazily loaded data object as parameter method for this to work. :param o: The object to load. :param bool change_links: If ``True``, replace the old object in the hierarchy. """ if not hasattr(o, 'lazy_shape'): return o io = self._get_object_io(o) if io: if hasattr(io, 'load_lazy_object'): ret = io.load_lazy_object(o) elif isinstance(io, neo.io.NeoHdf5IO): ret = io.get(o.hdf5_path, cascade=False, lazy=False) else: return o ret.segment = o.segment if hasattr(o, 'recordingchannelgroup'): ret.recordingchannelgroup = o.recordingchannelgroup elif hasattr(o, 'recordingchannel'): ret.recordingchannel = o.recordingchannel elif hasattr(o, 'unit'): ret.unit = o.unit if change_links: name = type(o).__name__.lower() + 's' l = getattr(o.segment, name) try: l[l.index(o)] = ret except ValueError: l.append(ret) l = None if hasattr(o, 'recordingchannelgroup'): l = getattr(o.recordingchannelgroup, name) elif hasattr(o, 'recordingchannel'): l = getattr(o.recordingchannel, name) elif hasattr(o, 'unit'): l = getattr(o.unit, name) if l is not None: try: l[l.index(o)] = ret except ValueError: l.append(ret) return ret return o def _load_object_list(self, objects): """ Return a list of loaded objects for a list of (potentially) lazily loaded objects. """ ret = [] for o in objects: ret.append(self._load_lazy_object(o, self.data_lazy_mode > 1)) return ret def _load_object_dict(self, objects): """ Return a dictionary (without changing indices) of loaded objects for a dictionary of (potentially) lazily loaded objects. """ for k, v in objects.items(): if isinstance(v, list): objects[k] = self._load_object_list(v) elif isinstance(v, dict): for ik, iv in v.items(): v[ik] = self._load_lazy_object(iv, self.data_lazy_mode > 1) else: raise ValueError( 'Only dicts or lists are supported as dictionary values!') return objects def selection_blocks(self): """ Return a list of selected blocks. """ return [self._active_block(b) for b in self.blocks()] def spike_trains(self): """ Return a list of :class:`neo.core.SpikeTrain` objects. """ trains = [] units = set(self.units()) for s in self.segments(): trains.extend([t for t in s.spiketrains if t.unit in units or t.unit is None]) for u in self.units(): trains.extend([t for t in u.spiketrains if t.segment is None]) return self._load_object_list(trains) def spike_trains_by_unit(self): """ Return a dictionary (indexed by Unit) of lists of :class:`neo.core.SpikeTrain` objects. """ trains = OrderedDict() segments = set(self.segments()) for u in self.units(): st = [t for t in u.spiketrains if t.segment in segments or t.segment is None] if st: trains[u] = st nonetrains = [] for s in self.segments(): nonetrains.extend([t for t in s.spiketrains if t.unit is None]) if nonetrains: trains[self.no_unit] = nonetrains return self._load_object_dict(trains) def spike_trains_by_segment(self): """ Return a dictionary (indexed by Segment) of lists of :class:`neo.core.SpikeTrain` objects. """ trains = OrderedDict() units = self.units() for s in self.segments(): st = [t for t in s.spiketrains if t.unit in units or t.unit is None] if st: trains[s] = st nonetrains = [] for u in self.units(): nonetrains.extend([t for t in u.spiketrains if t.segment is None]) if nonetrains: trains[self.no_segment] = nonetrains return self._load_object_dict(trains) def spike_trains_by_unit_and_segment(self): """ Return a dictionary (indexed by Unit) of dictionaries (indexed by Segment) of :class:`neo.core.SpikeTrain` objects. """ trains = OrderedDict() segments = self.segments() for u in self.units(): for s in segments: segtrains = [t for t in u.spiketrains if t.segment == s] if segtrains: if u not in trains: trains[u] = OrderedDict() trains[u][s] = segtrains[0] nonetrains = [t for t in u.spiketrains if t.segment is None] if nonetrains: if u not in trains: trains[u] = OrderedDict() trains[u][self.no_segment] = nonetrains[0] nonetrains = OrderedDict() for s in self.segments(): segtrains = [t for t in s.spiketrains if t.unit is None] if segtrains: nonetrains[s] = segtrains[0] if nonetrains: trains[self.no_unit] = nonetrains return self._load_object_dict(trains) def spikes(self): """ Return a list of :class:`neo.core.Spike` objects. """ spikes = [] units = self.units() for s in self.segments(): spikes.extend([t for t in s.spikes if t.unit in units or t.unit is None]) for u in self.units(): spikes.extend([t for t in u.spikes if t.segment is None]) return self._load_object_list(spikes) def spikes_by_unit(self): """ Return a dictionary (indexed by Unit) of lists of :class:`neo.core.Spike` objects. """ spikes = OrderedDict() segments = self.segments() for u in self.units(): sp = [t for t in u.spikes if t.segment in segments or t.segment is None] if sp: spikes[u] = sp nonespikes = [] for s in self.segments(): nonespikes.extend([t for t in s.spikes if t.unit is None]) if nonespikes: spikes[self.no_unit] = nonespikes return self._load_object_dict(spikes) def spikes_by_segment(self): """ Return a dictionary (indexed by Segment) of lists of :class:`neo.core.Spike` objects. """ spikes = OrderedDict() units = self.units() for s in self.segments(): sp = [t for t in s.spikes if t.unit in units or t.unit is None] if sp: spikes[s] = sp nonespikes = [] for u in self.units(): nonespikes.extend([t for t in u.spikes if t.segment is None]) if nonespikes: spikes[self.no_segment] = nonespikes return self._load_object_dict(spikes) def spikes_by_unit_and_segment(self): """ Return a dictionary (indexed by Unit) of dictionaries (indexed by Segment) of :class:`neo.core.Spike` lists. """ spikes = OrderedDict() segments = self.segments() for u in self.units(): for s in segments: segtrains = [t for t in u.spikes if t.segment == s] if segtrains: if u not in spikes: spikes[u] = OrderedDict() spikes[u][s] = segtrains nonespikes = [t for t in u.spikes if t.segment is None] if nonespikes: if u not in spikes: spikes[u] = OrderedDict() spikes[u][self.no_segment] = nonespikes nonespikes = OrderedDict() for s in self.segments(): segspikes = [t for t in s.spikes if t.unit is None] if segspikes: nonespikes[s] = segspikes if nonespikes: spikes[self.no_unit] = nonespikes return self._load_object_dict(spikes) def events(self, include_array_events=True): """ Return a dictionary (indexed by Segment) of lists of Event objects. """ ret = OrderedDict() for s in self.segments(): if s.events: ret[s] = s.events if include_array_events: for a in s.eventarrays: if s not in ret: ret[s] = [] ret[s].extend(convert.event_array_to_events(a)) return ret def labeled_events(self, label, include_array_events=True): """ Return a dictionary (indexed by Segment) of lists of Event objects with the given label. """ ret = OrderedDict() for s in self.segments(): events = [e for e in s.events if e.label == label] if events: ret[s] = events if include_array_events: for a in s.eventarrays: if s not in ret: ret[s] = [] events = convert.event_array_to_events(a) ret[s].extend((e for e in events if e.label == label)) return ret def event_arrays(self): """ Return a dictionary (indexed by Segment) of lists of EventArray objects. """ ret = OrderedDict() for s in self.segments(): if s.eventarrays: ret[s] = s.eventarrays return self._load_object_dict(ret) def epochs(self, include_array_epochs=True): """ Return a dictionary (indexed by Segment) of lists of Epoch objects. """ ret = OrderedDict() for s in self.segments(): if s.epochs: ret[s] = s.epochs if include_array_epochs: for a in s.epocharrays: if s not in ret: ret[s] = [] ret[s].extend(convert.epoch_array_to_epochs(a)) return ret def labeled_epochs(self, label, include_array_epochs=True): """ Return a dictionary (indexed by Segment) of lists of Epoch objects with the given label. """ ret = OrderedDict() for s in self.segments(): epochs = [e for e in s.epochs if e.label == label] if epochs: ret[s] = epochs if include_array_epochs: for a in s.epocharrays: if s not in ret: ret[s] = [] epochs = convert.epoch_array_to_epochs(a) ret[s].extend((e for e in epochs if e.label == label)) return ret def epoch_arrays(self): """ Return a dictionary (indexed by Segment) of lists of EpochArray objects. """ ret = OrderedDict() for s in self.segments(): if s.epocharrays: ret[s] = s.epocharrays return self._load_object_dict(ret) def analog_signals(self, conversion_mode=1): """ Return a list of :class:`neo.core.AnalogSignal` objects. """ signals = [] channels = self.recording_channels() if conversion_mode == 1 or conversion_mode == 3: for s in self.segments(): signals.extend([t for t in s.analogsignals if t.recordingchannel in channels or t.recordingchannel is None]) for u in self.recording_channels(): signals.extend([t for t in u.analogsignals if t.segment is None]) if conversion_mode > 1: for sa in self.analog_signal_arrays(): for sig in convert.analog_signal_array_to_analog_signals(sa): if (sig.recordingchannel is None or sig.recordingchannel in channels): signals.append(sig) return self._load_object_list(signals) def analog_signals_by_segment(self, conversion_mode=1): """ Return a dictionary (indexed by Segment) of lists of :class:`neo.core.AnalogSignal` objects. """ signals = OrderedDict() channels = self.recording_channels() if conversion_mode == 1 or conversion_mode == 3: for s in self.segments(): sig = [] for c in channels: sig.extend([t for t in c.analogsignals if t.segment == s]) sig.extend([t for t in s.analogsignals if t.recordingchannel is None]) if sig: signals[s] = sig nonesignals = [] for c in channels: nonesignals.extend([t for t in c.analogsignals if t.segment is None]) if nonesignals: signals[self.no_segment] = nonesignals if conversion_mode > 1: for o, sa_list in \ self.analog_signal_arrays_by_segment().iteritems(): for sa in sa_list: for sig in \ convert.analog_signal_array_to_analog_signals(sa): if sig.recordingchannel is None or \ sig.recordingchannel in channels: if o not in signals: signals[o] = [] signals[o].append(sig) return self._load_object_dict(signals) def analog_signals_by_channel(self, conversion_mode=1): """ Return a dictionary (indexed by RecordingChannel) of lists of :class:`neo.core.AnalogSignal` objects. """ signals = OrderedDict() channels = self.recording_channels() if conversion_mode == 1 or conversion_mode == 3: segments = self.segments() for c in channels: sig = [t for t in c.analogsignals if t.segment in segments or t.segment is None] if sig: signals[c] = sig nonesignals = [] for s in segments: nonesignals.extend([t for t in s.analogsignals if t.recordingchannel is None]) if nonesignals: signals[self.no_channel] = nonesignals if conversion_mode > 1: for o, sa_list in \ self.analog_signal_arrays_by_channelgroup().iteritems(): for sa in sa_list: for sig in \ convert.analog_signal_array_to_analog_signals(sa): if sig.recordingchannel is None: if self.no_channel not in signals: signals[self.no_channel] = [sig] else: signals[self.no_channel].append(sig) elif sig.recordingchannel in channels: if sig.recordingchannel not in signals: signals[sig.recordingchannel] = [sig] else: signals[sig.recordingchannel].append(sig) return self._load_object_dict(signals) def analog_signals_by_channel_and_segment(self, conversion_mode=1): """ Return a dictionary (indexed by RecordingChannel) of dictionaries (indexed by Segment) of :class:`neo.core.AnalogSignal` lists. """ signals = OrderedDict() channels = self.recording_channels() if conversion_mode == 1 or conversion_mode == 3: segments = self.segments() for c in channels: for s in segments: segsignals = [t for t in c.analogsignals if t.segment == s] if segsignals: if c not in signals: signals[c] = OrderedDict() signals[c][s] = segsignals nonesignals = [t for t in c.analogsignals if t.segment is None] if nonesignals: if c not in signals: signals[c] = OrderedDict() signals[c][self.no_segment] = nonesignals nonesignals = OrderedDict() for s in self.segments(): segsignals = [t for t in s.analogsignals if t.recordingchannel is None] if segsignals: nonesignals[s] = segsignals if nonesignals: signals[self.no_channel] = nonesignals if conversion_mode > 1: sigs = self.analog_signal_arrays_by_channelgroup_and_segment() for cg, inner in sigs.iteritems(): for seg, sa_list in inner.iteritems(): for sa in sa_list: for sig in convert.analog_signal_array_to_analog_signals(sa): chan = sig.recordingchannel if chan not in channels: continue if chan not in signals: signals[chan] = OrderedDict() if seg not in signals[chan]: signals[chan][seg] = [] signals[chan][seg].append(sig) return self._load_object_dict(signals) def analog_signal_arrays(self): """ Return a list of :class:`neo.core.AnalogSignalArray` objects. """ signals = [] channelgroups = self.recording_channel_groups() for s in self.segments(): signals.extend([t for t in s.analogsignalarrays if t.recordingchannelgroup in channelgroups or t.recordingchannelgroup is None]) for u in channelgroups: signals.extend([t for t in u.analogsignalarrays if t.segment is None]) return self._load_object_list(signals) def analog_signal_arrays_by_segment(self): """ Return a dictionary (indexed by Segment) of lists of :class:`neo.core.AnalogSignalArray` objects. """ signals = OrderedDict() channelgroups = self.recording_channel_groups() for s in self.segments(): sa = [] for c in channelgroups: sa.extend([t for t in c.analogsignalarrays if t.segment == s]) sa.extend([t for t in s.analogsignalarrays if t.recordingchannelgroup is None]) if sa: signals[s] = sa nonesignals = [] for c in channelgroups: nonesignals.extend([t for t in c.analogsignalarrays if t.segment is None]) if nonesignals: signals[self.no_segment] = nonesignals return self._load_object_dict(signals) def analog_signal_arrays_by_channelgroup(self): """ Return a dictionary (indexed by RecordingChannelGroup) of lists of :class:`neo.core.AnalogSignalArray` objects. """ signals = OrderedDict() segments = self.segments() for c in self.recording_channel_groups(): sa = [t for t in c.analogsignalarrays if t.segment in segments] if sa: signals[c] = sa nonesignals = [] for s in segments: nonesignals.extend([t for t in s.analogsignalarrays if t.recordingchannelgroup is None]) if nonesignals: signals[self.no_channelgroup] = nonesignals return self._load_object_dict(signals) def analog_signal_arrays_by_channelgroup_and_segment(self): """ Return a dictionary (indexed by RecordingChannelGroup) of dictionaries (indexed by Segment) of :class:`neo.core.AnalogSignalArray` lists. """ signals = OrderedDict() segments = self.segments() for c in self.recording_channel_groups(): for s in segments: segsignals = [t for t in c.analogsignalarrays if t.segment == s] if segsignals: if c not in signals: signals[c] = OrderedDict() signals[c][s] = segsignals nonesignals = [t for t in c.analogsignalarrays if t.segment is None] if nonesignals: if c not in signals: signals[c] = OrderedDict() signals[c][self.no_segment] = nonesignals nonesignals = OrderedDict() for s in self.segments(): segsignals = [t for t in s.analogsignalarrays if t.recordingchannelgroup is None] if segsignals: nonesignals[s] = segsignals if nonesignals: signals[self.no_channelgroup] = nonesignals return self._load_object_dict(signals) atexit.register(NeoDataProvider.clear)spykeutils-0.4.3/spykeutils/plugin/io_plugin.py0000644000175000017500000000242512664623646020153 0ustar robrobimport traceback import inspect import neo from neo.io.baseio import BaseIO from .. import SpykeException def load_from_file(path): """ Load IO plugins from a Python file. Inserts the loaded plugins into the neo.iolist. :param str path: The path to the file to search for IO plugins. """ f = open(path) load_from_string(f.read(), path) def load_from_string(code, path=''): """ Load IO plugins from Python code. Inserts the loaded plugins into the neo.iolist. :param str code: The IO plugin code. :param str path: The path for the IO plugin. """ exc_globals = {} try: exec(code, exc_globals) except Exception: raise SpykeException('Error during execution of ' + 'potential Neo IO file ' + path + ':\n' + traceback.format_exc() + '\n') for cl in exc_globals.values(): if not inspect.isclass(cl): continue # Should be a subclass of BaseIO... if not issubclass(cl, BaseIO): continue # but should not be BaseIO (can happen when directly imported) if cl == BaseIO: continue cl._is_spyke_plugin = True cl._python_file = path neo.io.iolist.insert(0, cl)spykeutils-0.4.3/spykeutils/plugin/analysis_plugin.py0000644000175000017500000004005612664623646021371 0ustar robrobimport hashlib import json import os import tables import time import gui_data class HashEntry(tables.IsDescription): hash = tables.StringCol(32) filename = tables.StringCol(992) # 1024-32 -> long filenames are possible class AnalysisPlugin(gui_data.DataSet): """ Base class for Analysis plugins. Inherit this class to create a plugin. The two most important methods are :func:`get_name` and :func:`start`. Both should be overridden by every plugin. The class also has functionality for GUI configuration and saving/restoring analysis results. The GUI configuration uses :mod:`guidata`. Because `AnalysisPlugin` inherits from `DataSet`, configuration options can easily be added directly to the class definition. For example, the following code creates an analysis that has two configuration options which are used in the start() method to print to the console:: from spykeutils.plugin import analysis_plugin, gui_data class SamplePlugin(analysis_plugin.AnalysisPlugin): some_time = gui_data.FloatItem('Some time', default=2.0, unit='ms') print_more = gui_data.BoolItem('Print additional info', default=True) def start(self, current, selections): print 'The selected time is', self.some_time, 'milliseconds.' if self.print_more: print 'This is important additional information!' The class attribute ``data_dir`` contains a base directory for saving and loading data. It is set by Spyke Viewer to the directory specified in the settings. When using an AnalysisPlugin without Spyke Viewer, the default value is an empty string (so the current directory will be used) and the attribute can be set to an arbitrary directory. """ data_dir = '' def __init__(self): super(AnalysisPlugin, self).__init__() def get_name(self): """ Return the name of an analysis. Override to specify analysis name. :returns: The name of the plugin. :rtype: str """ return 'Prototype Plugin' def get_title(self): # Override guidata.DataSet.get_title() return self.get_name() def get_comment(self): # Override guidata.DataSet.get_comment() ret = None if self.__doc__: string = self.__doc__ if not isinstance(string, basestring): string = unicode(string) if not isinstance(string, unicode): string = unicode(string, 'utf-8') doc_lines = string.splitlines() # Remove empty lines at the begining of comment while doc_lines and not doc_lines[0].strip(): del doc_lines[0] if doc_lines: ret = "\n".join([x.strip() for x in doc_lines]) return ret def start(self, current, selections): """ Entry point for processing. Override with analysis code. :param current: This data provider is used if the analysis should be performed on the data currently selected in the GUI. :type current: :class:`spykeviewer.plugin_framework.data_provider.DataProvider` :param list selections: This parameter contains all saved selections. It is used if an analysis needs multiple data sets. """ pass def configure(self): """ Configure the analysis. Override if a different or additional configuration apart from guidata is needed. """ if self._items: return self.edit() def get_parameters(self): """ Return a dictionary of the configuration that can be read with :func:`deserialize_parameters`. Override both if non-guidata attributes need to be serialized or if some guidata parameters should not be serialized (e.g. they only affect the visual presentation). :returns: A dictionary of all configuration parameters. :rtype: dict """ if not hasattr(self, '_items'): return {} ret = {} for i in self._items: v = i.get_value(self) if isinstance(v, str): ret[i._name] = unicode(v) else: ret[i._name] = v return ret def set_parameters(self, parameters): """ Load configuration from a dictionary that has been created by :func:`serialize_parameters`. Parameters that are not part of the guidata attributes of the plugin are ignored. Override if non-guidata attributes need to be serialized. :param dict parameters: A dictionary of all configuration parameters. """ for n, v in parameters.iteritems(): if hasattr(self, '_' + n): setattr(self, '_' + n, v) def _get_hash(self, selections, params, use_guiparams): """ Return hash and the three strings used for it (guidata,selections,params) """ if use_guiparams: guidata_string = repr(sorted(self.get_parameters().items())) else: guidata_string = '' selection_string = json.dumps([s.data_dict() for s in selections]) if params: param_string = repr(sorted(params.items())) else: param_string = '' md5 = hashlib.md5() hash_string = guidata_string + selection_string + param_string md5.update(hash_string) return md5.hexdigest(), guidata_string, selection_string, param_string def save(self, name, selections, params=None, save_guiparams=True): """ Return a HDF5 file object with parameters already stored. Save analysis results to this file. :param str name: The name of the results to save. A folder with this name will be used (and created if necessary) to store the analysis result files. :param sequence selections: A list of :class:`DataProvider` objects that are relevant for the analysis results. :param dict params: A dictionary, indexed by strings (which should be valid as python identifiers), with parameters apart from GUI configuration used to obtain the results. All keys have to be integers, floats, strings or lists of these types. :param bool save_guiparams: Determines if the guidata parameters of the class should be saved in the file. :returns: An open PyTables file object ready to be used to store data. Afterwards, the file has to be closed by calling the :func:`tables.File.close` method. :rtype: :class:`tables.File` """ if not selections: selections = [] if not os.path.exists(os.path.join(self.data_dir, name)): os.makedirs(os.path.join(self.data_dir, name)) if params is None: params = {} # Use unicode parameters for n, v in params.iteritems(): if isinstance(v, str): params[n] = unicode(v) # Create parameter hash hash_, guidata_string, selection_string, param_string = \ self._get_hash(selections, params, save_guiparams) # File name is current time stamp time_stamp = time.strftime("%Y%m%d-%H%M%S") file_name_base = os.path.join(self.data_dir, name, time_stamp) file_name = file_name_base # Make sure not to overwrite another file i = 2 while os.path.exists(file_name): file_name = file_name_base + '_%d' % i i += 1 file_name += '.h5' self._add_hash_lookup_entry(name, hash_, file_name) h5 = tables.openFile(file_name, 'w') # Save guidata parameters paramgroup = h5.createGroup('/', 'guiparams') if save_guiparams: guiparams = self.get_parameters() for p, v in guiparams.iteritems(): t = type(v) if t == int or t == float: h5.setNodeAttr(paramgroup, p, v) else: h5.setNodeAttr(paramgroup, p, json.dumps(v)) # Save selections the provided by plugin h5.setNodeAttr('/', 'selections', selection_string) # Save additional parameters provided by plugin paramgroup = h5.createGroup('/', 'userparams') for p, v in params.iteritems(): t = type(v) if t == int or t == float: h5.setNodeAttr(paramgroup, p, v) else: h5.setNodeAttr(paramgroup, p, json.dumps(v)) # Save hash and current time h5.setNodeAttr('/', '_hash', hash_) h5.setNodeAttr('/', 'time', time.time()) return h5 def load(self, name, selections, params=None, consider_guiparams=True): """ Return the most recent HDF5 file for a certain parameter configuration. If no such file exists, return None. This function works with the files created by :func:`save`. :param str name: The name of the results to load. :param sequence selections: A list of :class:`DataProvider` objects that are relevant for the analysis results. :param dict params: A dictionary, indexed by strings (which should be valid as python identifiers), with parameters apart from GUI configuration used to obtain the results. All keys have to be integers, floats, strings or lists of these types. :param bool consider_guiparams: Determines if the guidata parameters of the class should be considered if they exist in the HDF5 file. This should be set to False if :func:`save` is used with ``save_guiparams`` set to ``False``. :returns: An open PyTables file object ready to be used to read data. Afterwards, the file has to be closed by calling the :func:`tables.File.close` method. If no appropriate file exists, None is returned. :rtype: :class:`tables.File` """ if not selections: selections = [] if not os.path.exists(os.path.join(self.data_dir, name)): return None if params is None: params = {} # Use unicode parameters for n, v in params.iteritems(): if isinstance(v, str): params[n] = unicode(v) hash_, guidata_string, selection_string, param_string =\ self._get_hash(selections, params, consider_guiparams) # Loop through files and find the most recent match file_names = self._get_hash_file_names(name, hash_) newest = 0.0 best = None for fn in file_names: with tables.openFile(fn, 'r') as h5: file_hash = h5.getNodeAttr('/', '_hash') if hash_ != file_hash: continue # Hash is correct, check guidata parameters gui_params = {} for pname in h5.root.guiparams._v_attrs._f_list('user'): v = h5.getNodeAttr('/guiparams', pname) if isinstance(v, str): gui_params[pname] = json.loads(v) else: gui_params[pname] = v if gui_params: gui_param_string = repr(sorted(gui_params.items())) else: gui_param_string = '' if gui_param_string != guidata_string: continue # Check selections file_selections = h5.getNodeAttr('/', 'selections') if file_selections != selection_string: continue # Check custom parameters file_params = {} for pname in h5.root.userparams._v_attrs._f_list('user'): v = h5.getNodeAttr('/userparams', pname) if isinstance(v, str): file_params[pname] = json.loads(v) else: file_params[pname] = v if file_params: file_param_string = repr(sorted(file_params.items())) else: file_param_string = '' if file_param_string != param_string: continue # Make sure the most recent file is used analysis_time = h5.getNodeAttr('/', 'time') if analysis_time < newest: continue best = fn newest = analysis_time if best: return tables.openFile(best, 'r') return None @classmethod def _create_hash_lookup_file(cls, name): """ (Re)creates a hash lookup file for a results directory. This file contains all file hashes in the directory so that the correct file for a given parameter set can be found quickly. :param str name: The name of the results. """ name = os.path.join(cls.data_dir, name) hashfile_name = os.path.join(name, 'hash.h5') hash_file = tables.openFile(hashfile_name, mode='w') table = hash_file.createTable('/', 'lookup_table', HashEntry, title='Hash lookup') # Loop through files and write hashes file_names = [os.path.join(name, f) for f in os.listdir(name)] entry = table.row for fn in file_names: if not fn.endswith('.h5') or fn == 'hash.h5': continue try: with tables.openFile(fn, 'r') as h5: file_hash = h5.getNodeAttr('/', '_hash') entry['hash'] = file_hash entry['filename'] = fn entry.append() except: pass # Not a valid data file, no problem hash_file.close() @classmethod def _add_hash_lookup_entry(cls, name, hash_, file_name): """ Add a new entry to the hash lookup file. :param str name: The name of the results. :param str hash_: The hash of the parameters. :param str file_name: The file name of the results. """ hashfile_name = os.path.join(cls.data_dir, name, 'hash.h5') if not os.path.exists(hashfile_name): cls._create_hash_lookup_file(name) hash_file = tables.openFile(hashfile_name, mode='r+') table = hash_file.root.lookup_table # Add entry entry = table.row entry['hash'] = hash_ entry['filename'] = file_name entry.append() hash_file.close() @classmethod def _get_hash_file_names(cls, name, hash_, _recurse=False): """ Return a list of file names for a parameter hash. If no hash lookup file exists, it will be created. If it can not be created, a list HDF5 files in the directory will be returned. :param str name: The name of the results. :param str hash_: The hash of the parameters. :param bool _recurse: Internal guard against infinite recursion. """ dataname = name name = os.path.join(cls.data_dir, name) hashfile_name = os.path.join(name, 'hash.h5') if not os.path.exists(hashfile_name): try: cls._create_hash_lookup_file(name) except: return [os.path.join(name, f) for f in os.listdir(name) if f.endswith('.h5') and not f == 'hash.h5'] hash_file = tables.openFile(hashfile_name, mode='r') table = hash_file.root.lookup_table files = [row['filename'] for row in table.where('hash == "%s"' % hash_)] ret = [] for f in files: if os.path.exists(f): ret.append(f) elif not _recurse: hash_file.close() try: cls._create_hash_lookup_file(name) except: return [os.path.join(name, f) for f in os.listdir(name) if f.endswith('.h5') and not f == 'hash.h5'] return cls._get_hash_file_names(dataname, hash_, True) hash_file.close() return retspykeutils-0.4.3/spykeutils/plugin/gui_data.py0000644000175000017500000002320412664623646017741 0ustar robrob""" This module gives access to all members of :mod:`guidata.dataset.dataitems` and :mod:`guidata.dataset.datatypes`. If :mod:`guidata` cannot be imported, the module offers suitable dummy objects instead (e.g. for use on a server). """ try: from guidata.dataset.dataitems import * from guidata.dataset.datatypes import * except ImportError, e: import datetime import scipy as sp # datatypes dummies class DataItem(object): def __init__(self, *args, **kwargs): self._default = None if 'default' in kwargs: self._default = kwargs['default'] def get_prop(self, realm, name, default=None): pass def get_prop_value(self, realm, instance, name, default=None): pass def set_prop(self, realm, **kwargs): return self def set_pos(self, col=0, colspan=None): return self def get_help(self, instance): pass def get_auto_help(self, instance): pass def format_string(self, instance, value, fmt, func): pass def get_string_value(self, instance): pass def set_name(self, new_name): self._name = new_name def set_from_string(self, instance, string_value): pass def set_default(self, instance): self.__set__(instance, self._default) def accept(self, visitor): pass def __set__(self, instance, value): setattr(instance, "_"+self._name, value) def __get__(self, instance, klass): if instance is not None: return getattr(instance, "_"+self._name, self._default) else: return self def get_value(self, instance): return self.__get__(instance, instance.__class__) def check_item(self, instance): pass def check_value(self, instance, value): pass def from_string(self, instance, string_value): pass def bind(self, instance): return DataItemVariable(self, instance) def serialize(self, instance, writer): pass def deserialize(self, instance, reader): pass class DataSetMeta(type): """ DataSet metaclass. Create class attribute `_items`: list of the DataSet class attributes. Also make sure that all data items have the correct `_name` attribute. """ def __new__(mcs, name, bases, dct): items = [] for base in bases: if getattr(base, "__metaclass__", None) is DataSetMeta: for item in base._items: items.append(item) for attrname, value in dct.items(): if isinstance(value, DataItem): value.set_name(attrname) items.append(value) dct["_items"] = items return type.__new__(mcs, name, bases, dct) class DataSet(object): __metaclass__ = DataSetMeta def __init__(self, title=None, comment=None, icon=''): self.set_defaults() def _get_translation(self): pass def _compute_title_and_comment(self): pass def get_title(self): pass def get_comment(self): pass def get_icon(self): pass def set_defaults(self): for item in self._items: item.set_default(self) def check(self): pass def text_edit(self): pass def edit(self, parent=None, apply=None): pass def view(self, parent=None): pass def to_string(self, debug=False, indent=None, align=False): pass def accept(self, vis): pass def serialize(self, writer): pass def deserialize(self, reader): pass def read_config(self, conf, section, option): pass def write_config(self, conf, section, option): pass @classmethod def set_global_prop(cls, realm, **kwargs): pass class ItemProperty(object): def __init__(self, callable=None): pass def __call__(self, instance, item, value): pass def set(self, instance, item, value): pass class FormatProp(ItemProperty): def __init__(self, fmt, ignore_error=True): pass class GetAttrProp(ItemProperty): pass class ValueProp(ItemProperty): pass class NotProp(ItemProperty): pass class DataItemVariable(object): def __init__(self, item, instance): self.item = item self.instance = instance def get_prop_value(self, realm, name, default=None): pass def get_prop(self, realm, name, default=None): pass def get_help(self): pass def get_auto_help(self): pass def get_string_value(self): pass def set_default(self): return self.item.set_default(self.instance) def get(self): return self.item.get_value(self.instance) def set(self, value): return self.item.__set__(self.instance, value) def set_from_string(self, string_value): pass def check_item(self): pass def check_value(self, value): pass def from_string(self, string_value): pass def label(self): pass class GroupItem(DataItem): pass class BeginGroup(DataItem): pass class EndGroup(DataItem): pass class TabGroupItem(GroupItem): pass class BeginTabGroup(BeginGroup): def get_group(self): pass class EndTabGroup(EndGroup): pass # dataitems dummies class NumericTypeItem(DataItem): def __init__(self, *args, **kwargs): super(NumericTypeItem, self).__init__(*args, **kwargs) class FloatItem(NumericTypeItem): def __init__(self, *args, **kwargs): super(FloatItem, self).__init__(*args, **kwargs) if self._default is None: self._default = float() class IntItem(NumericTypeItem): def __init__(self, *args, **kwargs): super(IntItem, self).__init__(*args, **kwargs) if self._default is None: self._default = int() class StringItem(DataItem): def __init__(self, *args, **kwargs): super(StringItem, self).__init__(*args, **kwargs) if self._default is None: self._default = str() class BoolItem(DataItem): def __init__(self, *args, **kwargs): super(BoolItem, self).__init__(*args, **kwargs) if self._default is None: self._default = bool() class DateItem(DataItem): def __init__(self, *args, **kwargs): super(DateItem, self).__init__(*args, **kwargs) if self._default is None: self._default = datetime.date.today() class DateTimeItem(DateItem): def __init__(self, *args, **kwargs): super(DateTimeItem, self).__init__(*args, **kwargs) if self._default is None: self._default = datetime.datetime.now() class ColorItem(StringItem): def __init__(self, *args, **kwargs): super(ColorItem, self).__init__(*args, **kwargs) if self._default is None: self._default = str() class FileSaveItem(StringItem): def __init__(self, *args, **kwargs): super(FileSaveItem, self).__init__(*args, **kwargs) class FileOpenItem(FileSaveItem): def __init__(self, *args, **kwargs): super(FileOpenItem, self).__init__(*args, **kwargs) class FilesOpenItem(FileSaveItem): def __init__(self, *args, **kwargs): super(FilesOpenItem, self).__init__(*args, **kwargs) if self._default is None: self._default = list() class DirectoryItem(StringItem): def __init__(self, *args, **kwargs): super(DirectoryItem, self).__init__(*args, **kwargs) class ChoiceItem(DataItem): def __init__(self, *args, **kwargs): super(ChoiceItem, self).__init__(*args, **kwargs) if self._default is None: self._default = int() class MultipleChoiceItem(ChoiceItem): def __init__(self, *args, **kwargs): super(MultipleChoiceItem, self).__init__(*args, **kwargs) if self._default is None: self._default = list() class ImageChoiceItem(ChoiceItem): def __init__(self, *args, **kwargs): super(ImageChoiceItem, self).__init__(*args, **kwargs) class FloatArrayItem(DataItem): def __init__(self, *args, **kwargs): super(FloatArrayItem, self).__init__(*args, **kwargs) if self._default is None: self._default = sp.array([]) class ButtonItem(DataItem): def __init__(self, *args, **kwargs): super(ButtonItem, self).__init__(*args, **kwargs) class DictItem(ButtonItem): def __init__(self, *args, **kwargs): super(DictItem, self).__init__(*args, **kwargs) if self._default is None: self._default = dict() class FontFamilyItem(StringItem): def __init__(self, *args, **kwargs): super(FontFamilyItem, self).__init__(*args, **kwargs) if self._default is None: self._default = str()spykeutils-0.4.3/spykeutils/plugin/data_provider.py0000644000175000017500000003626412664623646021021 0ustar robrobimport neo class DataProvider(object): """ Defines all methods that should be implemented by a selection/data provider class. A `DataProvider` encapsulates access to a selection of data. It can be used by plugins to acesss data currently selected in the GUI or in saved selections. It also contains an attribute `progress`, a :class:`spykeutils.progress_indicator.ProgressIndicator` that can be used to report the progress of an operation (and is used by methods of this class if they can lead to processing times of half a second or more). This class serves as an abstract base class and should not be instantiated.""" _factories = {} no_unit = neo.Unit(name='No Unit') no_segment = neo.Segment(name='No segment') no_channel = neo.RecordingChannel(name='No recording channel') no_channelgroup = neo.RecordingChannelGroup(name='No recording channel group') no_unit.annotate(unique_id=-1) no_segment.annotate(unique_id=-1) no_channel.annotate(unique_id=-1) no_channelgroup.annotate(unique_id=-1) def __init__(self, name, progress): self.name = name self.progress = progress def _invert_indices(self, dictionary): """ Invert the indices of a dictionary of dictionaries. """ dict_type = type(dictionary) ret = dict_type() for i1 in dictionary: for i2 in dictionary[i1]: if not i2 in ret: ret[i2] = dict_type() ret[i2][i1] = dictionary[i1][i2] return ret def blocks(self): """ Return a list of selected Block objects. The returned objects will contain all regular references, not just to selected objects. """ return [] def segments(self): """ Return a list of selected Segment objects. The returned objects will contain all regular references, not just to selected objects. """ return [] def recording_channel_groups(self): """ Return a list of selected RecordingChannelGroup objects. The returned objects will contain all regular references, not just to selected objects. """ return [] def recording_channels(self): """ Return a list of selected RecordingChannel objects. The returned objects will contain all regular references, not just to selected objects. """ return [] def units(self): """ Return a list of selected Unit objects. The returned objects will contain all regular references, not just to selected objects. """ return [] def selection_blocks(self): """ Return a list of selected blocks. The returned blocks will contain references to all other selected elements further down in the object hierarchy, but no references to elements which are not selected. The returned hierarchy is a copy, so changes made to it will not persist. The main purpose of this function is to provide an object hierarchy that can be saved to a neo file. It is not recommended to use it for data processing, the respective functions that return objects lower in the hierarchy are better suited for that purpose. """ return [] def spike_trains(self): """ Return a list of :class:`neo.core.SpikeTrain` objects. """ return [] def spike_trains_by_unit(self): """ Return a dictionary (indexed by Unit) of lists of :class:`neo.core.SpikeTrain` objects. If spike trains not attached to a Unit are selected, their dicionary key will be ``DataProvider.no_unit``. """ return {} def spike_trains_by_segment(self): """ Return a dictionary (indexed by Segment) of lists of :class:`neo.core.SpikeTrain` objects. If spike trains not attached to a Segment are selected, their dictionary key will be ``DataProvider.no_segment``. """ return {} def spike_trains_by_unit_and_segment(self): """ Return a dictionary (indexed by Unit) of dictionaries (indexed by Segment) of :class:`neo.core.SpikeTrain` objects. If there are multiple spike trains in one Segment for the same Unit, only the first will be contained in the returned dictionary. If spike trains not attached to a Unit or Segment are selected, their dictionary key will be ``DataProvider.no_unit`` or ``DataProvider.no_segment``, respectively. """ return {} def spike_trains_by_segment_and_unit(self): """ Return a dictionary (indexed by Unit) of dictionaries (indexed by Segment) of :class:`neo.core.SpikeTrain` objects. If there are multiple spike trains in one Segment for the same Unit, only the first will be contained in the returned dictionary. If spike trains not attached to a Unit or Segment are selected, their dictionary key will be ``DataProvider.no_unit`` or ``DataProvider.no_segment``, respectively. """ return self._invert_indices(self.spike_trains_by_unit_and_segment()) def spikes(self): """ Return a list of :class:`neo.core.Spike` objects. """ return [] def spikes_by_unit(self): """ Return a dictionary (indexed by Unit) of lists of :class:`neo.core.Spike` objects. If spikes not attached to a Unit are selected, their dicionary key will be ``DataProvider.no_unit``. """ return {} def spikes_by_segment(self): """ Return a dictionary (indexed by Segment) of lists of :class:`neo.core.Spike` objects. If spikes not attached to a Segment are selected, their dictionary key will be ``DataProvider.no_segment``. """ return {} def spikes_by_unit_and_segment(self): """ Return a dictionary (indexed by Unit) of dictionaries (indexed by Segment) of :class:`neo.core.Spike` lists. If there are multiple spikes in one Segment for the same Unit, only the first will be contained in the returned dictionary. If spikes not attached to a Unit or Segment are selected, their dictionary key will be ``DataProvider.no_unit`` or ``DataProvider.no_segment``, respectively. """ return {} def spikes_by_segment_and_unit(self): """ Return a dictionary (indexed by Segment) of dictionaries (indexed by Unit) of lists of :class:`neo.core.Spike` lists. If spikes not attached to a Unit or Segment are selected, their dictionary key will be ``DataProvider.no_unit`` or ``DataProvider.no_segment``, respectively. """ return self._invert_indices(self.spikes_by_unit_and_segment()) def events(self, include_array_events = True): """ Return a dictionary (indexed by Segment) of lists of Event objects. :param bool include_array_events: Determines if EventArray objects should be converted to Event objects and included in the returned list. """ return {} def labeled_events(self, label, include_array_events = True): """ Return a dictionary (indexed by Segment) of lists of Event objects with the given label. :param str label: The name of the Event objects to be returnded :param bool include_array_events: Determines if EventArray objects should be converted to Event objects and included in the returned list. """ return [] def event_arrays(self): """ Return a dictionary (indexed by Segment) of lists of EventArray objects. """ return {} def epochs(self, include_array_epochs = True): """ Return a dictionary (indexed by Segment) of lists of Epoch objects. :param bool include_array_epochs: Determines if EpochArray objects should be converted to Epoch objects and included in the returned list. """ return {} def labeled_epochs(self, label, include_array_epochs = True): """ Return a dictionary (indexed by Segment) of lists of Epoch objects with the given label. :param str label: The name of the Epoch objects to be returnded :param bool include_array_epochs: Determines if EpochArray objects should be converted to Epoch objects and included in the returned list. """ return [] def epoch_arrays(self): """ Return a dictionary (indexed by Segment) of lists of EpochArray objects. """ return {} def analog_signals(self, conversion_mode=1): """ Return a list of :class:`neo.core.AnalogSignal` objects. :param int conversion_mode: Determines what signals are returned: 1. AnalogSignal objects only 2. AnalogSignal objects extracted from AnalogSignalArrays only 3. Both AnalogSignal objects and extracted AnalogSignalArrays """ return [] def analog_signals_by_segment(self, conversion_mode=1): """ Return a dictionary (indexed by Segment) of lists of :class:`neo.core.AnalogSignal` objects. If analog signals not attached to a Segment are selected, their dictionary key will be ``DataProvider.no_segment``. :param int conversion_mode: Determines what signals are returned: 1. AnalogSignal objects only 2. AnalogSignal objects extracted from AnalogSignalArrays only 3. Both AnalogSignal objects and extracted AnalogSignalArrays """ return {} def analog_signals_by_channel(self, conversion_mode=1): """ Return a dictionary (indexed by RecordingChannel) of lists of :class:`neo.core.AnalogSignal` objects. If analog signals not attached to a RecordingChannel are selected, their dictionary key will be ``DataProvider.no_channel``. :param int conversion_mode: Determines what signals are returned: 1. AnalogSignal objects only 2. AnalogSignal objects extracted from AnalogSignalArrays only 3. Both AnalogSignal objects and extracted AnalogSignalArrays """ return {} def analog_signals_by_channel_and_segment(self, conversion_mode=1): """ Return a dictionary (indexed by RecordingChannel) of dictionaries (indexed by Segment) of :class:`neo.core.AnalogSignal` lists. If analog signals not attached to a Segment or RecordingChannel are selected, their dictionary key will be ``DataProvider.no_segment`` or ``DataProvider.no_channel``, respectively. :param int conversion_mode: Determines what signals are returned: 1. AnalogSignal objects only 2. AnalogSignal objects extracted from AnalogSignalArrays only 3. Both AnalogSignal objects and extracted AnalogSignalArrays """ return {} def analog_signals_by_segment_and_channel(self, conversion_mode=1): """ Return a dictionary (indexed by Segment) of dictionaries (indexed by RecordingChannel) of :class:`neo.core.AnalogSignal` lists. If analog signals not attached to a Segment or RecordingChannel are selected, their dictionary key will be ``DataProvider.no_segment`` or ``DataProvider.no_channel``, respectively. :param int conversion_mode: Determines what signals are returned: 1. AnalogSignal objects only 2. AnalogSignal objects extracted from AnalogSignalArrays only 3. Both AnalogSignal objects and extracted AnalogSignalArrays """ return self._invert_indices( self.analog_signals_by_channel_and_segment(conversion_mode)) def analog_signal_arrays(self): """ Return a list of :class:`neo.core.AnalogSignalArray` objects. """ return [] def analog_signal_arrays_by_segment(self): """ Return a dictionary (indexed by Segment) of lists of :class:`neo.core.AnalogSignalArray` objects. If analog signals arrays not attached to a Segment are selected, their dictionary key will be ``DataProvider.no_segment``. """ return {} def analog_signal_arrays_by_channelgroup(self): """ Return a dictionary (indexed by RecordingChannelGroup) of lists of :class:`neo.core.AnalogSignalArray` objects. If analog signals arrays not attached to a RecordingChannel are selected, their dictionary key will be ``DataProvider.no_channelgroup``. """ return {} def analog_signal_arrays_by_channelgroup_and_segment(self): """ Return a dictionary (indexed by RecordingChannelGroup) of dictionaries (indexed by Segment) of :class:`neo.core.AnalogSignalArray` objects. If there are multiple analog signals in one RecordingChannel for the same Segment, only the first will be contained in the returned dictionary. If analog signal arrays not attached to a Segment or RecordingChannelGroup are selected, their dictionary key will be ``DataProvider.no_segment`` or ``DataProvider.no_channelgroup``, respectively. """ return {} def analog_signal_arrays_by_segment_and_channelgroup(self): """ Return a dictionary (indexed by RecordingChannelGroup) of dictionaries (indexed by Segment) of :class:`neo.core.AnalogSignalArray` objects. If there are multiple analog signals in one RecordingChannel for the same Segment, only the first will be contained in the returned dictionary. If analog signal arrays not attached to a Segment or RecordingChannelGroup are selected, their dictionary key will be ``DataProvider.no_segment`` or ``DataProvider.no_channelgroup``, respectively. """ return self._invert_indices( self.analog_signal_arrays_by_channelgroup_and_segment()) def refresh_view(self): """ Refresh associated views of the data. Use this method if when you change the neo hierarchy on which the selection is based (e.g. adding or removing objects). It will ensure that all current views on the data are updated, for example in Spyke Viewer. """ pass def data_dict(self): """ Return a dictionary with all information to serialize the object. """ return {} @classmethod def from_data(cls, data, progress=None): """ Create a new `DataProvider` object from a dictionary. This method is mostly for internal use. The respective type of `DataProvider` (e.g. :class:`spykeviewer.plugin_framework.data_provider_neo.DataProviderNeo` has to be imported in the environment where this function is called. :param dict data: A dictionary containing data from a `DataProvider` object, as returned by :func:`data_dict`. :param ProgressIndicator progress: The object where loading progress will be indicated. """ if progress: return cls._factories[data['type']](data, progress) return cls._factories[data['type']](data)spykeutils-0.4.3/spykeutils/plugin/data_provider_stored.py0000644000175000017500000000567512664623646022403 0ustar robrobimport json from data_provider import DataProvider from data_provider_neo import NeoDataProvider from ..progress_indicator import ProgressIndicator class NeoStoredProvider(NeoDataProvider): def __init__(self, data, progress=ProgressIndicator()): super(NeoStoredProvider, self).__init__(data['name'], progress) self.data = data self.block_cache = None @classmethod def from_current_selection(cls, name, viewer): """ Create new NeoStoredProvider from current viewer selection """ data = cls._get_data_from_viewer(viewer) data['name'] = name return cls(data, viewer.progress) @classmethod def from_file(cls, file_name, progress=ProgressIndicator()): """ Create new DBStoredProvider from JSON file """ data = json.load(file_name) return cls(data, progress) def save(self, file_name): """ Save selection to JSON file """ f = open(file_name, 'w') json.dump(self.data, f, sort_keys=True, indent=4) f.close() def data_dict(self): """ Return a dictionary with all information to serialize the object """ self.data['name'] = self.name return self.data def blocks(self): """ Return a list of selected Block objects """ if self.block_cache is None: self.block_cache = [] for b in self.data['blocks']: cl = None rp = None if len(b) > 2: cl = self.find_io_class(b[2]) if len(b) > 3: rp = b[3] self.block_cache.append(NeoDataProvider.get_block( b[1], b[0], force_io=cl, read_params=rp)) return self.block_cache def segments(self): """ Return a list of selected Segment objects """ blocks = self.blocks() segments = [] for s in self.data['segments']: segments.append(blocks[s[1]].segments[s[0]]) return segments def recording_channel_groups(self): """ Return a list of selected """ blocks = self.blocks() rcgs = [] for rcg in self.data['channel_groups']: rcgs.append(blocks[rcg[1]].recordingchannelgroups[rcg[0]]) return rcgs def recording_channels(self): """ Return a list of selected recording channel indices """ rcgs = self.recording_channel_groups() rcs = [] for rc in self.data['channels']: rcs.append(rcgs[rc[1]].recordingchannels[rc[0]]) return rcs def units(self): """ Return a list of selected Unit objects """ rcgs = self.recording_channel_groups() units = [] for u in self.data['units']: units.append(rcgs[u[1]].units[u[0]]) return units # Enable automatic creation of NeoStoredProvider objects DataProvider._factories['Neo'] = NeoStoredProviderspykeutils-0.4.3/spykeutils/plugin/startplugin.py0000644000175000017500000001341112664623646020537 0ustar robrob#! /usr/bin/env python import sys import os import argparse import inspect import json import pickle from spykeutils.plugin.analysis_plugin import AnalysisPlugin from spykeutils.plugin.data_provider import DataProvider from spykeutils.plugin import io_plugin from spykeutils import progress_indicator # Data provider implementations need to be imported so they can be loaded import spykeutils.plugin.data_provider_stored try: from spykeutils.plot.helper import _needs_qt, ProgressIndicatorDialog from PyQt4.QtGui import QApplication from PyQt4.QtCore import QTimer # Prepare matplotlib import matplotlib matplotlib.use('Qt4Agg') import matplotlib.pyplot matplotlib.pyplot.ion() # Prepare application and progress bar app = QApplication([]) progress = ProgressIndicatorDialog(None) has_qt = True except ImportError: has_qt = False try: # Use command line progressbar if package is available import progressbar class ProgressIndicatorConsole(progress_indicator.ProgressIndicator): """ Implements a progress indicator for the CLI """ def __init__(self): widgets = ['', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()] self.bar = progressbar.ProgressBar(widgets=widgets) self.tick = 0 self.maxval = 0 def set_ticks(self, ticks): """ Set the required number of ticks before the operation is done """ self.bar.maxval = ticks self.maxval = ticks self.tick = 0 def begin(self, title=''): """ Signal that the operation starts """ self.tick = 0 self.bar.finished = False self.bar.widgets[0] = title + ' ' self.bar.start() def step(self, num_steps=1): """ Signal that one or more steps of the operation were completed """ self.tick += num_steps if self.tick > self.bar.maxval: self.bar.maxval = self.tick self.bar.update(self.tick) super(ProgressIndicatorConsole, self).step(num_steps) def set_status(self, newStatus): """ Set status description """ self.bar.widgets[0] = newStatus + ' ' def done(self): """ Signal that the operation is done. """ self.bar.finish() progress = ProgressIndicatorConsole() except ImportError: progress = progress_indicator.ProgressIndicator() def main(): parser = argparse.ArgumentParser(description='Start an analysis plugin') parser.add_argument('Name', type=str, help='Name of analysis class') parser.add_argument('Code', type=str, help='Code of the analysis') parser.add_argument('Selection', type=str, help='Serialized selection') parser.add_argument( '-c', '--config', dest='config', type=str, help='Pickled configuration of analysis') parser.add_argument( '-cf', '--codefile', dest='codefile', action='store_const', const=True, default=False, help='Code represents a filename containing ' 'code (default: Code is a string containing code') parser.add_argument( '-sf', '--selectionfile', dest='selectionfile', action='store_const', const=True, default=False, help='Selection represents a filename containing ' 'the serialized selection (default: Selection is a string') parser.add_argument( '-dd', '--datadir', type=str, help='The data directory') parser.add_argument( '-io', type=str, default=[], nargs='+', help='IO plugin file paths') parsed = parser.parse_known_args() args = parsed[0] if parsed[1]: print >> sys.stderr, ('Warning: the following command options are ' 'invalid and were ignored:'), parsed[1] exc_globals = {} if args.codefile: execfile(args.Code, exc_globals) else: exec(args.Code, exc_globals) # Load plugin plugin = None for cl in exc_globals.values(): if not inspect.isclass(cl): continue if not issubclass(cl, AnalysisPlugin): continue if not cl.__name__ == args.Name: continue plugin = cl() break # Load IO plugins for io in args.io: io_plugin.load_from_file(io) if not plugin: sys.stderr.write('Could not find plugin class, aborting...\n') sys.exit(1) # Load configuration if args.config: plugin.set_parameters(pickle.loads(args.config)) # Load selection try: if args.selectionfile: f = open(args.Selection, 'r') sel_string = '\n'.join(f.readlines()) else: sel_string = args.Selection sels = json.loads(sel_string) except Exception: sys.stderr.write('Could not load selection, aborting...\n') sys.exit(1) selections = [] for s in sels: selection = DataProvider.from_data(s) selection.progress = progress selections.append(selection) if args.datadir and os.path.isdir(args.datadir): AnalysisPlugin.data_dir = args.datadir try: plugin.start(selections[0], selections[1:]) except progress_indicator.CancelException: print 'User canceled.' finally: progress.done() if has_qt: # Quit event loop if the plugin has not created a Qt Window if app.topLevelWidgets() == [progress]: app.exit(0) return 0 if __name__ == '__main__': if has_qt: QTimer.singleShot(0, main) app.exec_() else: sys.exit(main())spykeutils-0.4.3/spykeutils/plugin/__init__.py0000644000175000017500000000177412664623646017733 0ustar robrob""" This package provides support for writing plugins for Spyke Viewer. It belongs to `spykeutils` so that plugins can be executed in an evironment where the `spykeviewer` package and its dependencies are not installed (e.g. servers). `spykeutils` installs a script named "spykeplugin" that can be used to start plugins directly from the command line, supplying selection and plugin parameter information. It is also the default script that Spyke Viewer uses when starting plugins remotely. If you want to implement your own script for starting plugins remotely, e.g. on a server, you should conform to the interface of this script. :mod:`analysis_plugin` Module ----------------------------- .. automodule:: spykeutils.plugin.analysis_plugin :members: :show-inheritance: :mod:`data_provider` Module --------------------------- .. automodule:: spykeutils.plugin.data_provider :members: :show-inheritance: :mod:`gui_data` Module ---------------------- .. automodule:: spykeutils.plugin.gui_data """spykeutils-0.4.3/spykeutils/tests/0000755000175000017500000000000012664623646015455 5ustar robrobspykeutils-0.4.3/spykeutils/tests/test_scipy_quantities.py0000644000175000017500000002456012664623646022472 0ustar robrob try: import unittest2 as ut assert ut # Suppress pyflakes warning about redefinition of unused ut except ImportError: import unittest as ut from itertools import combinations from numpy.testing import assert_array_equal, assert_array_almost_equal import scipy as sp from spykeutils import _scipy_quantities as spq import quantities as pq class TestScipyMaximum(ut.TestCase): def test_works_with_normal_arrays(self): a = sp.array([[4, 2], [2, 4]]) b = sp.array([[3, 3], [3, 3]]) expected = sp.array([[4, 3], [3, 4]]) actual = spq.maximum(a, b) assert_array_equal(expected, actual) def test_works_with_quantity_arrays(self): a = sp.array([[4, 2], [2, 4]]) * pq.s b = sp.array([[3000, 3000], [3000, 3000]]) * pq.ms expected = sp.array([[4, 3], [3, 4]]) * pq.s actual = spq.maximum(a, b) assert_array_almost_equal(expected, actual) actual = spq.maximum(b, a) assert_array_almost_equal(expected, actual) def test_works_with_normal_and_quantity_arrays_mixed(self): a = sp.array([[4, 2], [2, 4]]) b = sp.array([[3, 3], [3, 3]]) * pq.dimensionless expected = sp.array([[4, 3], [3, 4]]) * pq.dimensionless actual = spq.maximum(a, b) assert_array_almost_equal(expected, actual) actual = spq.maximum(b, a) assert_array_almost_equal(expected, actual) def test_works_with_(self): a = sp.array([[4, 2], [2, 4]]) * pq.dimensionless b = sp.array([[3, 3], [3, 3]]) * pq.dimensionless expected = sp.array([[4, 3], [3, 4]]) * pq.dimensionless actual = spq.maximum(a, b) assert_array_almost_equal(expected, actual) actual = spq.maximum(b, a) assert_array_almost_equal(expected, actual) def test_uses_out_param(self): a = sp.array([[4, 2], [2, 4]]) * pq.s b = sp.array([[3000, 3000], [3000, 3000]]) * pq.ms expected = sp.array([[4, 3], [3, 4]]) * pq.s out = sp.zeros_like(expected) assert out.units == pq.s spq.maximum(a, b, out) assert_array_almost_equal(expected, out) spq.maximum(b, a, out) assert_array_almost_equal(expected, out) class TestScipyMinimum(ut.TestCase): def test_works_with_normal_arrays(self): a = sp.array([[4, 2], [2, 4]]) b = sp.array([[3, 3], [3, 3]]) expected = sp.array([[3, 2], [2, 3]]) actual = spq.minimum(a, b) assert_array_equal(expected, actual) def test_works_with_quantity_arrays(self): a = sp.array([[4, 2], [2, 4]]) * pq.s b = sp.array([[3000, 3000], [3000, 3000]]) * pq.ms expected = sp.array([[3, 2], [2, 3]]) * pq.s actual = spq.minimum(a, b) assert_array_almost_equal(expected, actual) actual = spq.minimum(b, a) assert_array_almost_equal(expected, actual) def test_works_with_normal_and_quantity_arrays_mixed(self): a = sp.array([[4, 2], [2, 4]]) b = sp.array([[3, 3], [3, 3]]) * pq.dimensionless expected = sp.array([[3, 2], [2, 3]]) * pq.dimensionless actual = spq.minimum(a, b) assert_array_almost_equal(expected, actual) actual = spq.minimum(b, a) assert_array_almost_equal(expected, actual) def test_uses_out_param(self): a = sp.array([[4, 2], [2, 4]]) * pq.s b = sp.array([[3000, 3000], [3000, 3000]]) * pq.ms expected = sp.array([[3, 2], [2, 3]]) * pq.s out = sp.zeros_like(expected) assert out.units == pq.s spq.minimum(a, b, out) assert_array_almost_equal(expected, out) spq.minimum(b, a, out) assert_array_almost_equal(expected, out) class TestScipyMeshgrid(ut.TestCase): def test_works_with_normal_arrays(self): a = sp.array([1, 2, 3]) b = sp.array([4, 5]) expected_a = sp.array([[1, 2, 3], [1, 2, 3]]) expected_b = sp.array([[4, 4, 4], [5, 5, 5]]) actual_a, actual_b = spq.meshgrid(a, b) assert_array_equal(expected_a, actual_a) assert_array_equal(expected_b, actual_b) def test_works_with_quantity_arrays(self): a = sp.array([1, 2, 3]) * pq.s b = sp.array([4, 5]) * pq.m expected_a = sp.array([[1, 2, 3], [1, 2, 3]]) * pq.s expected_b = sp.array([[4, 4, 4], [5, 5, 5]]) * pq.m actual_a, actual_b = spq.meshgrid(a, b) assert_array_equal(expected_a, actual_a) assert_array_equal(expected_b, actual_b) def test_works_with_normal_and_quantity_arrays_mixed(self): a = sp.array([1, 2, 3]) b = sp.array([4, 5]) * pq.m expected_a = sp.array([[1, 2, 3], [1, 2, 3]]) expected_b = sp.array([[4, 4, 4], [5, 5, 5]]) * pq.m actual_a, actual_b = spq.meshgrid(a, b) assert_array_equal(expected_a, actual_a) assert_array_equal(expected_b, actual_b) a = sp.array([1, 2, 3]) * pq.s b = sp.array([4, 5]) expected_a = sp.array([[1, 2, 3], [1, 2, 3]]) * pq.s expected_b = sp.array([[4, 4, 4], [5, 5, 5]]) actual_a, actual_b = spq.meshgrid(a, b) assert_array_equal(expected_a, actual_a) assert_array_equal(expected_b, actual_b) class TestScipyConcatenate(ut.TestCase): def test_works_with_normal_arrays(self): a = sp.array([[1]]) b = sp.array([[2]]) c = sp.array([[3]]) axis = 1 expected = sp.array([[1, 2, 3]]) actual = spq.concatenate((a, b, c), axis=axis) assert_array_equal(expected, actual) def test_works_with_quantity_arrays(self): a = sp.array([[1]]) * pq.s b = sp.array([[2000]]) * pq.ms c = sp.array([[3]]) * pq.s axis = 1 expected = sp.array([[1, 2, 3]]) * pq.s actual = spq.concatenate((a, b, c), axis=axis) assert_array_equal(expected, actual) def test_works_with_normal_and_quantity_arrays_mixed(self): a = sp.array([[1]]) b = sp.array([[2]]) * pq.dimensionless c = sp.array([[3]]) axis = 1 expected = sp.array([[1, 2, 3]]) actual = spq.concatenate((a, b, c), axis=axis) assert_array_equal(expected, actual) def test_raises_exception_if_mixing_incompatible_units(self): a = sp.array([[1]]) b = sp.array([[2]]) * pq.dimensionless c = sp.array([[3]]) * pq.s d = sp.array([[4]]) * pq.m for p in combinations((a, c, d), 2): with self.assertRaises(Exception): spq.concatenate(p) with self.assertRaises(Exception): spq.concatenate(p[::-1]) for p in combinations((b, c, d), 2): with self.assertRaises(Exception): spq.concatenate(p) with self.assertRaises(Exception): spq.concatenate(p[::-1]) class TestScipyInner(ut.TestCase): def test_works_with_normal_arrays(self): a = sp.array([0.0, 1.0, 2.0]) b = sp.array([2.0, 2.0, 3.0]) expected = sp.inner(a, b) actual = spq.inner(a, b) self.assertAlmostEqual(expected, actual) def test_works_with_quantities_arrays(self): a = sp.array([0.0, 1.0, 2.0]) * pq.s b = sp.array([2000.0, 2000.0, 3000.0]) * pq.ms expected = 8.0 * pq.s actual = spq.inner(a, b) self.assertAlmostEqual(expected, actual.rescale(expected.units)) def test_works_with_normal_and_quantity_arrays_mixed(self): a = sp.array([0.0, 1.0, 2.0]) * pq.dimensionless b = sp.array([2.0, 2.0, 3.0]) expected = sp.inner(a, b) actual = spq.inner(a, b) self.assertAlmostEqual(expected, actual) def test_raises_exception_if_mixing_incompatible_units(self): a = sp.array([1.0]) b = sp.array([2.0]) * pq.dimensionless c = sp.array([3.0]) * pq.s d = sp.array([4.0]) * pq.m for p in combinations((a, c, d), 2): with self.assertRaises(Exception): spq.inner(p) with self.assertRaises(Exception): spq.inner(p[::-1]) for p in combinations((b, c, d), 2): with self.assertRaises(Exception): spq.inner(p) with self.assertRaises(Exception): spq.inner(p[::-1]) def test_works_with_multidimensional_arrays(self): a = sp.array([[0.0, 1.0], [2.0, 3.0]]) * pq.s b = sp.array([[2.0, 2.0], [3.0, 4.0]]) * pq.s expected = sp.array([[2.0, 4.0], [10.0, 18.0]]) * pq.s actual = spq.inner(a, b) assert_array_almost_equal(expected, actual.rescale(expected.units)) class TestScipyDiag(ut.TestCase): def test_diag_of_unitless_1d_array(self): a = sp.array([1, 2]) expected = sp.array([[1, 0], [0, 2]]) actual = spq.diag(a) assert_array_equal(expected, actual) def test_diag_of_unitless_2d_array(self): a = sp.array([[1, 0], [0, 2]]) expected = sp.array([1, 2]) actual = spq.diag(a) assert_array_equal(expected, actual) def test_diag_of_1d_quantity(self): a = sp.array([1, 2]) * pq.s expected = sp.array([[1, 0], [0, 2]]) * pq.s actual = spq.diag(a) self.assertEqual(expected.units, actual.units) assert_array_equal(expected, actual) def test_diag_of_2d_quantity(self): a = sp.array([[1, 0], [0, 2]]) * pq.s expected = sp.array([1, 2]) * pq.s actual = spq.diag(a) self.assertEqual(expected.units, actual.units) assert_array_equal(expected, actual) def test_respects_k_argument(self): a = sp.array([[1, 5], [0, 2]]) * pq.s expected = sp.array([5]) * pq.s actual = spq.diag(a, k=1) self.assertEqual(expected.units, actual.units) assert_array_equal(expected, actual) class Test_linspace(ut.TestCase): def test_returns_correct_quantity_for_one_bin(self): expected = sp.array([1.0]) * pq.s actual = spq.linspace(1.0 * pq.s, 4.0 * pq.s, 1) assert_array_almost_equal(expected, actual) def test_returns_correct_quantity_for_one_bin_after_rounding(self): expected = sp.array([1.0]) * pq.s actual = spq.linspace(1.0 * pq.s, 4.0 * pq.s, 1.4) assert_array_almost_equal(expected, actual) def test_returns_correct_quantity_for_more_than_one_bin(self): expected = sp.array([1.0, 2.0, 3.0, 4.0]) * pq.s actual = spq.linspace(1.0 * pq.s, 4.0 * pq.s, 4) assert_array_almost_equal(expected, actual) if __name__ == '__main__': ut.main() spykeutils-0.4.3/spykeutils/tests/builders.py0000644000175000017500000000220312664623646017635 0ustar robrob import neo import quantities as pq import scipy as sp def create_empty_spike_train(t_start=0.0 * pq.s, t_stop=10.0 * pq.s): return neo.SpikeTrain(sp.array([]) * pq.s, t_start=t_start, t_stop=t_stop) def arange_spikes(t_start, t_stop=None, t_step=1.0 * pq.s): """ Arranges equally spaced spikes in a spike train over an interval. There will be no spikes at the interval boundaries. :param t_start: The time point of the interval if `t_stop` is not `None`. Otherwise, it will be end point of the interval and the start point will be set to 0s. :type t_start: Quantity scalar :param t_stop: The end point of the interval. :type t_stop: Quantity scalar :param t_step: Spacing between the spikes. :type t_step: Quantity scalar :returns The arranged spike train. :rtype: :class:`neo.SpikeTrain` """ if t_stop is None: t_stop = t_start t_start = 0.0 * pq.s t_start.units = t_step.units t_stop.units = t_step.units spike_times = sp.arange(t_start + t_step, t_stop, t_step) * t_step.units return neo.SpikeTrain(spike_times, t_start=t_start, t_stop=t_stop) spykeutils-0.4.3/spykeutils/tests/__pycache__/0000755000175000017500000000000012664623646017665 5ustar robrobspykeutils-0.4.3/spykeutils/tests/test_rate_estimation.py0000644000175000017500000000217712664623646022264 0ustar robrobtry: import unittest2 as ut assert ut # Suppress pyflakes warning about redefinition of unused ut except ImportError: import unittest as ut from builders import arange_spikes from numpy.testing import assert_array_equal, assert_array_almost_equal import spykeutils.rate_estimation as re import neo import quantities as pq import scipy as sp class TestScipyMaximum(ut.TestCase): def test_collapse_empty_list(self): try: re.collapsed_spike_trains([]) except: self.fail('Collapsing an empty list of spike trains should' 'not raise an exception') def test_density_estimation_empty_list(self): try: re.spike_density_estimation({1: []}) except: self.fail('Density estimation with an empty list of spike' 'trains should not raise an exception') def test_psth_empty_list(self): try: re.psth({1: []}, 100 * pq.ms) except: self.fail('PSTH with an empty list of spikes should not raise' 'an exception') if __name__ == '__main__': ut.main() spykeutils-0.4.3/spykeutils/tests/test_spike_train_metrics.py0000644000175000017500000012065312664623646023133 0ustar robrob try: import unittest2 as ut assert ut # Suppress pyflakes warning about redefinition of unused ut except ImportError: import unittest as ut from builders import create_empty_spike_train import neo from numpy.testing import assert_array_almost_equal, assert_array_equal import quantities as pq import scipy as sp import spykeutils.signal_processing as sigproc import spykeutils.spike_train_metrics as stm import warnings class CommonMetricTestCases(object): """ Provides some common test cases which should work for all spike train metrics. """ def calc_metric(self, trains): """ Calculates and returns the metric under test. :param SpikeTrain a: :param SpikeTrain b: :rtype: float """ raise NotImplementedError() def test_is_zero_for_identical_spike_trains(self): st = neo.SpikeTrain( sp.array([1, 2, 3]) * pq.s, t_start=0 * pq.s, t_stop=4 * pq.s) expected = sp.zeros((2, 2)) assert_array_almost_equal(expected, self.calc_metric([st, st.copy()])) def test_works_with_empty_spike_trains(self): st = neo.SpikeTrain(sp.array([]) * pq.s, t_stop=2.0 * pq.s) expected = sp.zeros((2, 2)) assert_array_almost_equal(expected, self.calc_metric([st, st.copy()])) def test_is_symmetric(self): a = neo.SpikeTrain(sp.array([ 1.1844519, 1.57346687, 2.52261998, 3.65824785, 5.38988771, 5.63178278, 6.70500182, 7.99562401, 9.21135176 ]) * pq.s, t_stop=10.0 * pq.s) b = neo.SpikeTrain(sp.array([ 0.86096077, 3.54273148, 4.20476326, 6.02451599, 6.42851683, 6.5564268, 7.07864592, 7.2368936, 7.31784319, 8.15148958, 8.53540889 ]) * pq.s, t_stop=10.0 * pq.s) assert_array_almost_equal( self.calc_metric([a, b]), self.calc_metric([b, a])) class CommonSimilarityTestCases(object): """ Provides some common test cases which should work for all spike train similarity measures. """ def calc_similarity(self, trains): """ Calculates and returns the similarity measure under test. :param SpikeTrain a: :param SpikeTrain b: :rtype: float """ raise NotImplementedError() def test_returns_one_for_equal_spike_trains(self): a = neo.SpikeTrain(sp.array([ 1.1844519, 1.57346687, 2.52261998, 3.65824785, 5.38988771, 5.63178278, 6.70500182, 7.99562401, 9.21135176 ]) * pq.s, t_stop=10.0 * pq.s) expected = sp.ones((2, 2)) actual = self.calc_similarity([a, a.copy()]) assert_array_almost_equal(expected, actual) def test_is_symmetric(self): a = neo.SpikeTrain(sp.array([ 1.1844519, 1.57346687, 2.52261998, 3.65824785, 5.38988771, 5.63178278, 6.70500182, 7.99562401, 9.21135176 ]) * pq.s, t_stop=10.0 * pq.s) b = neo.SpikeTrain(sp.array([ 0.86096077, 3.54273148, 4.20476326, 6.02451599, 6.42851683, 6.5564268, 7.07864592, 7.2368936, 7.31784319, 8.15148958, 8.53540889 ]) * pq.s, t_stop=10.0 * pq.s) assert_array_almost_equal( self.calc_similarity([a, b]), self.calc_similarity([b, a])) class Test_cs_dist(ut.TestCase): def test_returns_zero_for_equal_spike_trains(self): a = neo.SpikeTrain(sp.array([ 1.1844519, 1.57346687, 2.52261998, 3.65824785, 5.38988771, 5.63178278, 6.70500182, 7.99562401, 9.21135176 ]) * pq.s, t_stop=10.0 * pq.s, sampling_rate=100 * pq.Hz) f = sigproc.GaussianKernel() expected = sp.array([[0.0, 0.0], [0.0, 0.0]]) assert_array_almost_equal(expected, stm.cs_dist( [a, a.copy()], f, 1 * pq.Hz)) def test_returns_nan_if_one_spike_train_is_empty(self): empty = create_empty_spike_train() non_empty = neo.SpikeTrain(sp.array([1.0]) * pq.s, t_stop=2.0 * pq.s) sampling_rate = 100 * pq.Hz smoothing_filter = sigproc.GaussianKernel() with warnings.catch_warnings(): warnings.simplefilter('ignore') self.assertTrue(sp.all(sp.isnan(stm.cs_dist( [empty, non_empty], smoothing_filter, sampling_rate=sampling_rate))[(0, 0, 1), (0, 1, 0)])) def test_returns_correct_spike_train_cauchy_schwarz_distance(self): a = neo.SpikeTrain( sp.array([1.0]) * pq.s, t_start=0.6 * pq.s, t_stop=1.4 * pq.s) b = neo.SpikeTrain( sp.array([0.5, 1.5]) * pq.s, t_stop=2.0 * pq.s) c = neo.SpikeTrain( sp.array([1.5]) * pq.s, t_start=0.6 * pq.s, t_stop=1.6 * pq.s) smoothing_filter = sigproc.GaussianKernel(1.0 * pq.s) expected = sp.array( [[0.0, 0.12467574, 0.48965132], [0.12467574, 0.0, 0.47476452], [0.48965132, 0.47476452, 0.0]]) actual = stm.cs_dist( [a, b, c], smoothing_filter, sampling_rate=200 * pq.Hz) assert_array_almost_equal(expected, actual, decimal=3) def test_is_symmetric(self): a = neo.SpikeTrain(sp.array([ 1.1844519, 1.57346687, 2.52261998, 3.65824785, 5.38988771, 5.63178278, 6.70500182, 7.99562401, 9.21135176 ]) * pq.s, t_stop=10.0 * pq.s) b = neo.SpikeTrain(sp.array([ 0.86096077, 3.54273148, 4.20476326, 6.02451599, 6.42851683, 6.5564268, 7.07864592, 7.2368936, 7.31784319, 8.15148958, 8.53540889 ]) * pq.s, t_stop=10.0 * pq.s) f = sigproc.GaussianKernel() sampling_rate = 350 * pq.Hz assert_array_almost_equal( stm.cs_dist([a, b], f, sampling_rate=sampling_rate), stm.cs_dist([b, a], f, sampling_rate=sampling_rate), decimal=3) class Test_event_synchronization(ut.TestCase, CommonSimilarityTestCases): def calc_similarity(self, trains): return stm.event_synchronization(trains) def test_returns_correct_event_synchronization(self): a = neo.SpikeTrain(sp.array([1.0, 2.5, 6.5]) * pq.s, t_stop=7.0 * pq.s) b = neo.SpikeTrain(sp.array([5.7, 1.0]) * pq.s, t_stop=10.0 * pq.s) c = neo.SpikeTrain(sp.array([2.0, 2.1, 5.0]) * pq.s, t_stop=10.0 * pq.s) expected = sp.array( [[1.0, 0.81649658092772615, 0.0], [0.81649658092772615, 1.0, 0.4082482904638631], [0.0, 0.4082482904638631, 1.0]]) actual = stm.event_synchronization([a, b, c]) assert_array_almost_equal(expected, actual) def test_allows_to_set_constant_tau(self): a = neo.SpikeTrain(sp.array([1.0, 2.5, 6.5]) * pq.s, t_stop=7.0 * pq.s) b = neo.SpikeTrain(sp.array([1.0, 5.7]) * pq.s, t_stop=10.0 * pq.s) tau = 0.5 * pq.s expected = sp.array( [[1.0, 0.40824829046386307], [0.40824829046386307, 1.0]]) actual = stm.event_synchronization([a, b], tau) assert_array_almost_equal(expected, actual) def test_allows_use_of_different_kernel(self): a = neo.SpikeTrain(sp.array([1.0, 2.5, 6.5]) * pq.s, t_stop=7.0 * pq.s) b = neo.SpikeTrain(sp.array([1.0, 5.7]) * pq.s, t_stop=10.0 * pq.s) tau = 1.0 * pq.s kernel = sigproc.LaplacianKernel(1.0, normalize=False) expected = sp.array( [[1.0, 0.70480122722318095], [0.70480122722318095, 1.0]]) actual = stm.event_synchronization([a, b], tau, kernel=kernel) assert_array_almost_equal(expected, actual) class Test_hunter_milton_similarity(ut.TestCase, CommonSimilarityTestCases): def calc_similarity(self, trains): return stm.hunter_milton_similarity(trains) def test_returns_correct_hunter_milton_similarity(self): a = neo.SpikeTrain(sp.array([1.0, 2.5, 6.5]) * pq.s, t_stop=7.0 * pq.s) b = neo.SpikeTrain( sp.array([1.2, 5.7, 8.0, 9.0]) * pq.s, t_stop=10.0 * pq.s) c = neo.SpikeTrain(sp.array([2.1, 2.0, 5.0]) * pq.s, t_stop=10.0 * pq.s) tau = 2.0 * pq.s expected = sp.array( [[1.0, 0.64128747518120299, 0.661254342403672], [0.64128747518120299, 1.0, 0.5521235786217787], [0.661254342403672, 0.5521235786217787, 1.0]]) actual = stm.hunter_milton_similarity([a, b, c], tau) assert_array_almost_equal(expected, actual) def test_allows_use_of_different_kernel(self): a = neo.SpikeTrain(sp.array([1.0, 2.5, 6.5]) * pq.s, t_stop=7.0 * pq.s) b = neo.SpikeTrain( sp.array([1.2, 5.7, 8.0, 9.0]) * pq.s, t_stop=10.0 * pq.s) kernel = sigproc.TriangularKernel(1.0 * pq.s, normalize=False) expected = sp.array( [[1.0, 0.29166666666666663], [0.29166666666666663, 1.0]]) actual = stm.hunter_milton_similarity([a, b], kernel=kernel) assert_array_almost_equal(expected, actual) def test_spike_trains_may_be_empty(self): empty = create_empty_spike_train() non_empty = neo.SpikeTrain(sp.array([1.0]) * pq.s, t_stop=3.0 * pq.s) expected = sp.array([[1.0, 0.0], [0.0, 1.0]]) actual = stm.hunter_milton_similarity([empty, non_empty]) assert_array_almost_equal(expected, actual) class Test_norm_dist(ut.TestCase): def test_returns_zero_for_equal_spike_trains(self): st = neo.SpikeTrain(sp.array([ 1.1844519, 1.57346687, 2.52261998, 3.65824785, 5.38988771, 5.63178278, 6.70500182, 7.99562401, 9.21135176 ]) * pq.s, t_stop=10.0 * pq.s, sampling_rate=100 * pq.Hz) f = sigproc.GaussianKernel() expected = sp.zeros((2, 2)) * pq.Hz ** 0.5 assert_array_almost_equal(expected, stm.norm_dist( [st, st.copy()], f, 1 * pq.Hz)) def test_returns_norm_if_one_spike_train_is_empty(self): empty = create_empty_spike_train() non_empty = neo.SpikeTrain(sp.array([1.0]) * pq.s, t_stop=2.0 * pq.s) sampling_rate = 100 * pq.Hz smoothing_filter = sigproc.GaussianKernel() norm = stm.st_norm( non_empty, smoothing_filter, sampling_rate=sampling_rate) expected = sp.array([[0.0, norm], [norm, 0.0]]) * pq.Hz ** 0.5 actual = stm.norm_dist( [empty, non_empty], smoothing_filter, sampling_rate=sampling_rate) assert_array_almost_equal(expected, actual, decimal=3) def test_returns_correct_spike_train_norm_distance(self): a = neo.SpikeTrain( sp.array([1.0]) * pq.s, t_start=0.6 * pq.s, t_stop=1.4 * pq.s) b = neo.SpikeTrain(sp.array([0.5, 1.5]) * pq.s, t_stop=2.0 * pq.s) c = neo.SpikeTrain(sp.array([1.0, 2.0]) * pq.s, t_stop=3.0 * pq.s) smoothing_filter = sigproc.GaussianKernel(1.0 * pq.s) expected = sp.array( [[0.0, 0.475035, 0.531116], [0.475035, 0.0, 0.309422], [0.531116, 0.309422, 0.0]]) * pq.Hz ** 0.5 actual = stm.norm_dist( [a, b, c], smoothing_filter, sampling_rate=200 * pq.Hz) assert_array_almost_equal( expected, actual.rescale(expected.units), decimal=3) def test_is_symmetric(self): a = neo.SpikeTrain(sp.array([ 1.1844519, 1.57346687, 2.52261998, 3.65824785, 5.38988771, 5.63178278, 6.70500182, 7.99562401, 9.21135176 ]) * pq.s, t_stop=10.0 * pq.s) b = neo.SpikeTrain(sp.array([ 0.86096077, 3.54273148, 4.20476326, 6.02451599, 6.42851683, 6.5564268, 7.07864592, 7.2368936, 7.31784319, 8.15148958, 8.53540889 ]) * pq.s, t_stop=10.0 * pq.s) f = sigproc.GaussianKernel() sampling_rate = 350 * pq.Hz assert_array_almost_equal( stm.norm_dist([a, b], f, sampling_rate=sampling_rate), stm.norm_dist([b, a], f, sampling_rate=sampling_rate), decimal=3) class Test_schreiber_similarity(ut.TestCase, CommonSimilarityTestCases): def calc_similarity(self, trains): k = sigproc.GaussianKernel() return stm.schreiber_similarity(trains, k) def test_returns_nan_if_one_spike_train_is_empty(self): empty = create_empty_spike_train() non_empty = neo.SpikeTrain(sp.array([1.0]) * pq.s, t_stop=2.0 * pq.s) k = sigproc.GaussianKernel() with warnings.catch_warnings(): warnings.simplefilter('ignore') actual = stm.schreiber_similarity((empty, non_empty), k) self.assertTrue(sp.isnan(actual[0, 0])) self.assertTrue(sp.isnan(actual[0, 1])) self.assertTrue(sp.isnan(actual[1, 0])) def test_returns_correct_spike_train_schreiber_similarity(self): a = neo.SpikeTrain( sp.array([1.0]) * pq.s, t_start=0.6 * pq.s, t_stop=1.4 * pq.s) b = neo.SpikeTrain( sp.array([0.5, 1.5]) * pq.s, t_stop=2.0 * pq.s) c = neo.SpikeTrain( sp.array([2.0, 1.0]) * pq.s, t_start=0.6 * pq.s, t_stop=2.4 * pq.s) k = sigproc.GaussianKernel(sp.sqrt(2.0) * pq.s) expected = sp.array([ [1.0, 0.9961114, 0.9430803], [0.9961114, 1.0, 0.9523332], [0.9430803, 0.9523332, 1.0]]) actual = stm.schreiber_similarity((a, b, c), k) assert_array_almost_equal(expected, actual) class Test_st_inner(ut.TestCase): def test_returns_zero_if_any_spike_train_is_empty(self): empty = create_empty_spike_train() non_empty = neo.SpikeTrain(sp.array([1.0]) * pq.s, t_stop=2.0 * pq.s) smoothing_filter = sigproc.GaussianKernel() sampling_rate = 1 * pq.Hz expected = sp.array([0.0]) * pq.Hz self.assertAlmostEqual( expected, stm.st_inner( [empty], [empty], smoothing_filter, sampling_rate)) self.assertAlmostEqual( expected, stm.st_inner( [empty], [non_empty], smoothing_filter, sampling_rate)) self.assertAlmostEqual( expected, stm.st_inner( [non_empty], [empty], smoothing_filter, sampling_rate)) def test_returns_correct_inner_spike_train_product(self): a = neo.SpikeTrain( sp.array([1.0]) * pq.s, t_start=0.6 * pq.s, t_stop=1.4 * pq.s) b = neo.SpikeTrain( sp.array([0.5, 1.5]) * pq.s, t_stop=2.0 * pq.s) smoothing_filter = sigproc.GaussianKernel(1.0 * pq.s) expected = 0.530007 * pq.Hz actual = stm.st_inner( [a], [b], smoothing_filter, sampling_rate=1000 * pq.Hz) self.assertAlmostEqual( expected, actual.rescale(expected.units), places=3) def test_is_symmetric(self): a = neo.SpikeTrain(sp.array([ 1.1844519, 1.57346687, 2.52261998, 3.65824785, 5.38988771, 5.63178278, 6.70500182, 7.99562401, 9.21135176 ]) * pq.s, t_stop=10.0 * pq.s) b = neo.SpikeTrain(sp.array([ 0.86096077, 3.54273148, 4.20476326, 6.02451599, 6.42851683, 6.5564268, 7.07864592, 7.2368936, 7.31784319, 8.15148958, 8.53540889 ]) * pq.s, t_stop=10.0 * pq.s) f = sigproc.GaussianKernel() sampling_rate = 150 * pq.Hz assert_array_almost_equal( stm.st_inner([a], [b], f, sampling_rate=sampling_rate), stm.st_inner([b], [a], f, sampling_rate=sampling_rate), decimal=3) def test_accepts_sequences_of_spike_trains(self): a = neo.SpikeTrain( sp.array([1000.0]) * pq.ms, t_start=0.6 * pq.s, t_stop=1.4 * pq.s) b = neo.SpikeTrain( sp.array([0.5, 1.5]) * pq.s, t_stop=2.0 * pq.s) f = sigproc.GaussianKernel() sampling_rate = 150 * pq.Hz expected = sp.array( [[0.282094, 0.530072], [0.530072, 1.003787]]) * pq.Hz actual = stm.st_inner([a, b], [a, b], f, sampling_rate=sampling_rate) assert_array_almost_equal(expected, actual, decimal=3) class Test_st_norm(ut.TestCase): def test_returns_zero_if_spike_train_is_empty(self): empty = create_empty_spike_train() smoothing_filter = sigproc.GaussianKernel() self.assertAlmostEqual(0.0, stm.st_norm( empty, smoothing_filter, 1 * pq.Hz)) def test_returns_correct_spike_train_norm(self): st = neo.SpikeTrain( sp.array([0.5, 1.0, 1.5]) * pq.s, t_stop=2.0 * pq.s) smoothing_filter = sigproc.GaussianKernel(1.0 * pq.s) expected = (2.34569 * pq.Hz) ** 0.5 actual = stm.st_norm(st, smoothing_filter, sampling_rate=200 * pq.Hz) self.assertAlmostEqual( expected, actual.rescale(expected.units), places=3) class Test_van_rossum_dist(ut.TestCase, CommonMetricTestCases): def calc_metric(self, trains): return stm.van_rossum_dist(trains) def test_return_correct_distance(self): a = neo.SpikeTrain( sp.array([1.0, 4.0, 5.0, 6.0, 9.0, 11.0]) * pq.s, t_stop=12.0 * pq.s) b = neo.SpikeTrain( sp.array([2.0, 4.0, 7.0, 10.0]) * pq.s, t_stop=12.0 * pq.s) c = neo.SpikeTrain(sp.array([4.0, 3.0]) * pq.s, t_stop=12.0 * pq.s) tau = 3.0 * pq.s expected = sp.array([ [0.0, 1.895846644204, 2.878796160479], [1.895846644204, 0.0, 1.760192079676], [2.878796160479, 1.760192079676, 0.0]]) actual = stm.van_rossum_dist((a, b, c), tau) assert_array_almost_equal(expected, actual) def test_distance_of_empty_spiketrain_and_single_spike_equals_one(self): a = neo.SpikeTrain(sp.array([]) * pq.s, t_stop=2.0 * pq.s) b = neo.SpikeTrain(sp.array([1.0]) * pq.s, t_stop=2.0 * pq.s) expected = sp.array([[0.0, 1.0], [1.0, 0.0]]) actual = stm.van_rossum_dist((a, b), 3.0 * pq.s) assert_array_almost_equal(expected, actual) def test_allows_use_of_different_kernel(self): a = neo.SpikeTrain(sp.array([1.0, 2.0]) * pq.s, t_stop=3.0 * pq.s) b = neo.SpikeTrain(sp.array([1.5]) * pq.s, t_stop=2.0 * pq.s) k = sigproc.GaussianKernel(1.0 * pq.s, normalize=False) expected = sp.array([ [0.0, 0.8264827], [0.8264827, 0.0]]) actual = stm.van_rossum_dist((a, b), kernel=k) assert_array_almost_equal(expected, actual) def test_does_not_fail_with_kernel_not_allowing_spike_trains_as_argument( self): # Compare a = neo.SpikeTrain(sp.array([1.0, 2.0]) * pq.s, t_stop=3.0 * pq.s) b = neo.SpikeTrain(sp.array([1.5]) * pq.s, t_stop=2.0 * pq.s) k = sigproc.TriangularKernel(1.0 * pq.s, normalize=False) stm.van_rossum_dist((a, b), kernel=k) def test_allows_tau_equal_to_infinity(self): a = neo.SpikeTrain(sp.array([1.0, 1.9, 2.0]) * pq.s, t_stop=3.0 * pq.s) b = neo.SpikeTrain(sp.array([1.5]) * pq.s, t_stop=2.0 * pq.s) tau = sp.inf * pq.s expected = sp.array([ [0.0, 4.0], [4.0, 0.0]]) actual = stm.van_rossum_dist((a, b), tau) assert_array_almost_equal(expected, actual) class Test_van_rossum_multiunit_dist(ut.TestCase, CommonMetricTestCases): # With only one spike train each we should get the normal van Rossum # distance. def calc_metric(self, trains): return stm.van_rossum_multiunit_dist({0: trains}, 1) def test_returns_correct_distance_for_multiunits(self): a0 = neo.SpikeTrain(sp.array([1.0, 5.0, 7.0]) * pq.s, t_stop=8.0 * pq.s) a1 = neo.SpikeTrain(sp.array([1.0, 2.0, 5.0]) * pq.s, t_stop=8.0 * pq.s) b0 = neo.SpikeTrain(sp.array([2.0, 4.0, 5.0]) * pq.s, t_stop=8.0 * pq.s) b1 = neo.SpikeTrain(sp.array([3.0, 8.0]) * pq.s, t_stop=9.0 * pq.s) units = {0: [a0, a1], 1: [b0, b1]} weighting = 0.3 expected = sp.array([[0.0, 2.37006181], [2.37006181, 0.0]]) actual = stm.van_rossum_multiunit_dist(units, weighting) assert_array_almost_equal(expected, actual) def test_allows_tau_equal_to_infinity_with_multiunits(self): a0 = neo.SpikeTrain(sp.array([1.0, 5.0, 7.0]) * pq.s, t_stop=8.0 * pq.s) a1 = neo.SpikeTrain(sp.array([5.0]) * pq.s, t_stop=8.0 * pq.s) b0 = neo.SpikeTrain(sp.array([2.0, 4.0, 5.0]) * pq.s, t_stop=8.0 * pq.s) b1 = neo.SpikeTrain(sp.array([3.0, 8.0]) * pq.s, t_stop=9.0 * pq.s) units = {0: [a0, a1], 1: [b0, b1]} weighting = 0.3 tau = sp.inf * pq.s dist = sp.sqrt(5.0 + weighting * 4.0) expected = sp.array([[0.0, dist], [dist, 0.0]]) actual = stm.van_rossum_multiunit_dist(units, weighting, tau) assert_array_almost_equal(expected, actual) class Test_victor_purpura_dist(ut.TestCase, CommonMetricTestCases): def calc_metric(self, trains): return stm.victor_purpura_dist(trains) def test_inserted_spikes_equal_cost_of_one(self): num_spikes = 3 st = neo.SpikeTrain(sp.arange(3) * pq.s, t_stop=num_spikes * pq.s) st_empty = create_empty_spike_train() expected = sp.array([[0.0, num_spikes], [num_spikes, 0.0]]) assert_array_almost_equal( expected, stm.victor_purpura_dist([st, st_empty])) def test_returns_q_weighted_dist_for_close_spike_pair(self): a = neo.SpikeTrain(sp.array([1.0]) * pq.s, t_stop=2 * pq.s) b = neo.SpikeTrain(sp.array([1.5]) * pq.s, t_stop=2 * pq.s) q = 2.0 / pq.s expected = sp.array([[0.0, 0.5 * 2.0], [0.5 * 2.0, 0.0]]) assert_array_almost_equal(expected, stm.victor_purpura_dist([a, b], q)) def test_returns_two_for_distant_spike_pair(self): a = neo.SpikeTrain(sp.array([1.0]) * pq.s, t_stop=6 * pq.s) b = neo.SpikeTrain(sp.array([5.0]) * pq.s, t_stop=6 * pq.s) q = 1.0 / pq.s expected = sp.array([[0.0, 2.0], [2.0, 0.0]]) assert_array_almost_equal(expected, stm.victor_purpura_dist([a, b], q)) def test_returns_correct_distance_for_two_spike_trains(self): q = 1.0 / pq.s a = neo.SpikeTrain( sp.array([1.0, 2.0, 4.1, 7.0, 7.1]) * pq.s, t_stop=8.0 * pq.s) b = neo.SpikeTrain( sp.array([1.2, 4.0, 4.3]) * pq.s, t_stop=8.0 * pq.s) # From a to b: # - shift 1.0 to 1.2 (cost 0.2) # - delete 2.0 (cost 1.0) # - shift 4.1 to 4.0 (cost 0.1) # - insert 4.3 (cost 1.0) # - delete 7.0 (cost 1.0) # - delete 7.1 (cost 1.0) expected = sp.array([[0.0, 4.3], [4.3, 0.0]]) assert_array_almost_equal(expected, stm.victor_purpura_dist([a, b], q)) def test_returns_correct_distance_for_complex_spike_trains(self): # This is a regression test for a bug that certain spike trains where # producing to large distances. trains = [ neo.SpikeTrain(sp.array([ 0.02675798, 0.03181146, 0.03341735, 0.03775562, 0.07791623, 0.08822388, 0.10770132, 0.12325048, 0.16989942, 0.2017788, 0.20671708, 0.21338806, 0.24211925, 0.25483266, 0.27496442, 0.27587779, 0.27987714, 0.29092447, 0.3126856, 0.31699044, 0.33125793, 0.38880785, 0.38881775, 0.44730422, 0.47123718, 0.47530894, 0.50035773, 0.5110994, 0.5406418, 0.55932289, 0.56299461, 0.61127646, 0.6669967, 0.6878365, 0.69095517, 0.71292938, 0.74403481, 0.79566084, 0.80520382, 0.87465267, 0.9233359, 0.97119188, 0.97221954, 0.98573419, 1.00598374, 1.08840599, 1.10346633, 1.11300801, 1.11736787, 1.17619865, 1.17750093, 1.18119904, 1.19001107, 1.23349135, 1.24515837, 1.31601168, 1.32058585, 1.3274779, 1.3304611, 1.39192936, 1.396939, 1.42214471, 1.43682422, 1.44056841, 1.44614004, 1.45396973, 1.48746414, 1.51381587, 1.52504075, 1.56534678, 1.56654466, 1.56932347, 1.62405807, 1.63243667, 1.64011958, 1.65348796, 1.67166925, 1.6899014, 1.70019229, 1.71489787, 1.7498802, 1.75506253, 1.77316786, 1.79721912, 1.80556803, 1.82943579, 1.8393378, 1.85571875, 1.86451301, 1.86915057, 1.93494862, 1.95227868, 1.95787129, 2.01151238, 2.05108779, 2.05622847, 2.07756536, 2.09751716, 2.11014462, 2.12756709, 2.1301002, 2.22850943, 2.23546736, 2.26357638, 2.32916089, 2.35222596, 2.36019072, 2.44110203, 2.48733729, 2.48868378, 2.49587805, 2.50484364, 2.52888902, 2.54460952, 2.55477246, 2.56718557, 2.57197204, 2.58715912, 2.62834212, 2.6607554, 2.71456005, 2.71990732, 2.73476721, 2.76560221, 2.79578411, 2.81420671, 2.82842414, 2.84323564, 2.86808335, 2.89346033, 2.89759722, 2.90250757, 2.92396906, 2.96089258, 2.99343156, 2.99513297, 3.00295214, 3.00404354, 3.01155098, 3.0220984, 3.06860675, 3.10776003, 3.11125211, 3.12200107, 3.13822244, 3.172325, 3.17359243, 3.17693368, 3.18779785, 3.1898421, 3.2027296, 3.20308197, 3.22950711, 3.23480067, 3.25230996, 3.26324005, 3.30303045, 3.3323502, 3.34200826, 3.38362587, 3.39374602, 3.40100303, 3.42572902, 3.43405842, 3.48714745, 3.48808569, 3.50765539, 3.51019425, 3.51654164, 3.53508831, 3.55091076, 3.55806575, 3.56160866, 3.64616879, 3.66336828, 3.70684962, 3.71508041, 3.71960502, 3.74088608, 3.7694215, 3.78687648, 3.78826898, 3.8032681, 3.80442445, 3.82610046, 3.83252045, 3.83375399, 3.83963007, 3.87070708, 3.89921058, 3.91373461, 3.98189025, 3.99281868, 3.99615101, 4.03866165, 4.06296107, 4.0664576, 4.10008341, 4.13249147, 4.14948245, 4.15544816, 4.18645968, 4.23956819, 4.24159763, 4.25682634, 4.29096996, 4.29801235, 4.30704865, 4.3364981, 4.34955189, 4.35691426, 4.37946289, 4.40449102, 4.41415224, 4.42969554, 4.43297123, 4.43672311, 4.46269914, 4.50611436, 4.54325245, 4.59867291, 4.6118659, 4.62881441, 4.64220816, 4.68635809, 4.6919799, 4.69224906, 4.71150593, 4.75981344, 4.76055566, 4.8129406, 4.83692968, 4.87459801, 4.8895905, 4.89048346, 4.90390866, 4.92131202, 4.95374717, 4.95658542, 4.9779478, 4.99927772, 5.00321623, 5.02279036, 5.02980636, 5.06479496, 5.07471904, 5.09194692, 5.1019829, 5.10353541, 5.10481109, 5.10639536, 5.13999128, 5.1501336, 5.15279668, 5.16864755, 5.18043833, 5.18738265, 5.19297201, 5.19496814, 5.19888859, 5.20398454, 5.24268102, 5.25293838, 5.25735102, 5.27904209, 5.32513061, 5.33412059, 5.35300406, 5.36359518, 5.38220169, 5.41380451, 5.44608516, 5.45664259, 5.46624451, 5.49995728, 5.52030155, 5.52986433, 5.53527111, 5.58813843, 5.5986904, 5.63867497, 5.64965832, 5.70854657, 5.77092465, 5.78018575, 5.80469618, 5.82611303, 5.84211921, 5.84769114, 5.85898366, 5.86174668, 5.86686434, 5.86807339, 5.88557362, 5.93531383, 5.94590946, 5.9535614, 5.96181496, 5.96211509, 5.96322495, 5.99951691, 6.02956462, 6.03071066, 6.11325118, 6.12068097, 6.13916618, 6.15618799, 6.17405661, 6.19074313, 6.20637448, 6.21144991, 6.22694995, 6.2504859, 6.29414487, 6.3132762, 6.37532399, 6.37625784, 6.41398007, 6.41816266, 6.42386713, 6.42767342, 6.43909112, 6.48312163, 6.50112821, 6.50284644, 6.52335736, 6.55053573, 6.55945474, 6.56113336, 6.58452909, 6.58510608, 6.59753607, 6.61954437, 6.64973018, 6.66495931, 6.66904812, 6.67276565, 6.73951848, 6.75443413, 6.75483586, 6.79528155, 6.81670372, 6.83292695, 6.84892368, 6.90221611, 6.94186031, 6.97372169, 6.98930105, 7.00503816, 7.01156979, 7.01622253, 7.04066381, 7.08116801, 7.1022431, 7.10534942, 7.12276162, 7.17072979, 7.1846351, 7.21250037, 7.23569895, 7.23759221, 7.26638189, 7.31573003, 7.39632157, 7.40696688, 7.42971144, 7.45062847, 7.4634739, 7.4718392, 7.49271328, 7.55204862, 7.59257437, 7.60799196, 7.61363934, 7.62867287, 7.64457945, 7.65194936, 7.66110909, 7.66676376, 7.67758238, 7.68405278, 7.69391715, 7.6990212, 7.72407479, 7.75592843, 7.77321337, 7.78914379, 7.80573035, 7.81001852, 7.81201576, 7.81761754, 7.822486, 7.88454532, 7.90159693, 7.92447452, 7.93032758, 7.95127432, 7.95471672, 7.95611181, 7.99765534, 8.00169997, 8.05611102, 8.06999799, 8.0877689, 8.11370158, 8.12326905, 8.19558094, 8.20785861, 8.22790536, 8.25096989, 8.29404755, 8.32625888, 8.38768653, 8.41293726, 8.44072146, 8.45655928, 8.46028366, 8.46062243, 8.47631889, 8.50685359, 8.539859, 8.55656747, 8.57298557, 8.60573667, 8.65462893, 8.67784071, 8.68571095, 8.71909035, 8.72206184, 8.7314385, 8.73608901, 8.74239948, 8.74416149, 8.75145957, 8.77516598, 8.88377333, 8.8848043, 8.89789711, 8.91243437, 8.91476806, 8.91492797, 8.92139551, 8.93704381, 8.96318634, 8.99623903, 9.00131449, 9.01417633, 9.01421952, 9.03203569, 9.03786051, 9.04157583, 9.09361684, 9.09610771, 9.10131371, 9.10609705, 9.12084572, 9.15575811, 9.15644013, 9.1691256, 9.18362837, 9.18595479, 9.21164258, 9.24095542, 9.24290778, 9.25767234, 9.26005027, 9.26048416, 9.28017441, 9.29182669, 9.30192562, 9.31486222, 9.35580549, 9.37514957, 9.43470264, 9.46401276, 9.48844607, 9.4945491, 9.50132042, 9.5133463, 9.51426077, 9.52668188, 9.52888838, 9.53854506, 9.54400945, 9.55057675, 9.57993589, 9.63604947, 9.64316243, 9.66791914, 9.70282942, 9.71906419, 9.72696098, 9.7422066, 9.74416635, 9.76302569, 9.77237119, 9.77808876, 9.78865054, 9.79208195, 9.82398648, 9.83977829, 9.85440184, 9.87001817, 9.91401035, 9.92363489, 9.9347058, 9.94121602, 9.95317336, 9.95549832, 9.95695226, 9.97754868, 9.98384015]) * pq.s, t_stop=10.0 * pq.s), neo.SpikeTrain(sp.array([ 0.0114491, 0.02651815, 0.02672949, 0.02712123, 0.03514833, 0.05414386, 0.07570339, 0.09427385, 0.10903071, 0.11588711, 0.11739125, 0.1285715, 0.14934368, 0.16684372, 0.21166201, 0.22235881, 0.23386214, 0.24181703, 0.25805984, 0.2654033, 0.27348522, 0.30116999, 0.31207604, 0.31553495, 0.32936142, 0.32953416, 0.35437639, 0.40074384, 0.41165687, 0.44861386, 0.49870305, 0.5265349, 0.53879183, 0.57395557, 0.62112778, 0.63952386, 0.65174804, 0.68523672, 0.72572932, 0.74491922, 0.77281653, 0.77533443, 0.83372669, 0.84671895, 0.87093241, 0.92558636, 0.94601541, 0.94777018, 0.94821996, 0.97271642, 1.0005331, 1.00257254, 1.00735428, 1.0198866, 1.04727644, 1.09182491, 1.09894488, 1.10078114, 1.10360265, 1.11904421, 1.12918186, 1.13765565, 1.18229212, 1.20436513, 1.21081849, 1.22066808, 1.22314962, 1.26854532, 1.30229203, 1.31703206, 1.32135388, 1.32907158, 1.33047318, 1.36227875, 1.39697511, 1.4242654, 1.4244518, 1.43681519, 1.4493789, 1.45152151, 1.46461455, 1.47733094, 1.48771515, 1.53536739, 1.54864524, 1.55283995, 1.5898638, 1.60887471, 1.64490284, 1.64502768, 1.66872741, 1.70025134, 1.71529419, 1.71851586, 1.75573609, 1.78231052, 1.8083983, 1.81541951, 1.81772587, 1.84818917, 1.85059323, 1.88875683, 1.90898902, 1.93557862, 1.9643203, 1.96710505, 1.98391057, 1.98527593, 2.03665079, 2.08708411, 2.08761721, 2.11103023, 2.12101666, 2.13992148, 2.17117369, 2.18684568, 2.22655021, 2.24875486, 2.24929527, 2.28056109, 2.28729401, 2.31258209, 2.32301025, 2.32477238, 2.32491974, 2.34173467, 2.35126611, 2.35149399, 2.38431406, 2.40687869, 2.42583741, 2.42797991, 2.42828893, 2.45838911, 2.46432188, 2.46473762, 2.47316229, 2.51085401, 2.5283335, 2.55848724, 2.56442768, 2.59182815, 2.60989243, 2.65008826, 2.67778032, 2.67781156, 2.68312729, 2.68929609, 2.70518959, 2.73459435, 2.78244226, 2.78290087, 2.79595168, 2.80616739, 2.80701334, 2.81042141, 2.85470512, 2.87509772, 2.88886327, 2.89375791, 2.97284058, 2.97512514, 2.98540772, 3.01458122, 3.03159057, 3.05350786, 3.05518717, 3.10446297, 3.13792582, 3.15204826, 3.17267234, 3.19586531, 3.19657011, 3.21282816, 3.25677248, 3.27720176, 3.28887985, 3.29735282, 3.2982325, 3.32269346, 3.32343112, 3.32637092, 3.34520261, 3.34914751, 3.4176678, 3.43099532, 3.48336162, 3.48518715, 3.52127749, 3.52151362, 3.5773688, 3.59222194, 3.6013162, 3.62748155, 3.63613575, 3.64713969, 3.65456465, 3.66853991, 3.73818958, 3.74375182, 3.80164474, 3.86614106, 3.89385381, 3.97585319, 3.98647681, 4.00558264, 4.0212778, 4.05202117, 4.0594387, 4.09760178, 4.11367539, 4.12070204, 4.12999226, 4.15656723, 4.20514307, 4.27451413, 4.27635573, 4.28445258, 4.28533623, 4.33012486, 4.35620149, 4.37670464, 4.37681744, 4.39893272, 4.44981225, 4.45885746, 4.47979453, 4.48028014, 4.51009319, 4.52546144, 4.57879502, 4.66509915, 4.71338549, 4.71713202, 4.73567885, 4.75441602, 4.79556635, 4.79582663, 4.82047298, 4.82055109, 4.83059559, 4.83590133, 4.86399401, 4.87413277, 4.87833755, 4.89208783, 4.9192821, 4.941063, 4.98772884, 5.01993596, 5.02465223, 5.06293715, 5.06939498, 5.07198031, 5.11089343, 5.14112836, 5.15388206, 5.18105507, 5.19314929, 5.19670734, 5.22545792, 5.23334406, 5.23459961, 5.2494979, 5.2573258, 5.25908266, 5.2840583, 5.2853253, 5.28590158, 5.32314432, 5.35959824, 5.36241399, 5.38921977, 5.40694111, 5.4313708, 5.46598325, 5.47254526, 5.49387086, 5.49886878, 5.56592236, 5.57180461, 5.58869339, 5.58984367, 5.59601824, 5.62938579, 5.64426059, 5.6476461, 5.67241871, 5.6771723, 5.67873946, 5.68074113, 5.72312447, 5.7271727, 5.76271693, 5.79335885, 5.80349046, 5.83560725, 5.84101573, 5.85666574, 5.8643614, 5.86509986, 5.86531037, 5.87744489, 5.90506991, 5.91776312, 5.96371983, 5.96613482, 5.98032448, 5.98608614, 6.00144331, 6.00838531, 6.00846468, 6.01048934, 6.02474142, 6.0335397, 6.05113466, 6.06459963, 6.06576204, 6.08503265, 6.10602749, 6.10606072, 6.22065498, 6.2532318, 6.29605114, 6.31945753, 6.35632236, 6.35896878, 6.36120413, 6.38709957, 6.39295197, 6.41809868, 6.42367352, 6.44628183, 6.47049815, 6.48133661, 6.49090302, 6.49289679, 6.50896993, 6.51693538, 6.54015486, 6.56308082, 6.568914, 6.57395747, 6.61319395, 6.63516058, 6.65665992, 6.66478415, 6.6710301, 6.67832287, 6.6987939, 6.69954116, 6.70655977, 6.72576878, 6.77771021, 6.77863482, 6.79102832, 6.81049338, 6.81235249, 6.81465697, 6.83783569, 6.84815101, 6.89710246, 6.98537525, 7.01954059, 7.02622255, 7.04976656, 7.07571722, 7.11728241, 7.13478378, 7.13478557, 7.16044495, 7.16456219, 7.19152888, 7.19978497, 7.22787642, 7.24906524, 7.25812186, 7.27034077, 7.30769391, 7.31820919, 7.35549295, 7.37285349, 7.37292834, 7.37424801, 7.3785301, 7.4196362, 7.42932103, 7.43036261, 7.45139091, 7.47555417, 7.50122532, 7.51360212, 7.51962212, 7.55560134, 7.58438748, 7.62698845, 7.64682633, 7.66868854, 7.6760022, 7.69020752, 7.7238978, 7.76340706, 7.76775711, 7.79077235, 7.79151683, 7.79383994, 7.80542945, 7.83695238, 7.85946794, 7.88079942, 7.96879553, 7.99422322, 7.99584892, 8.09873296, 8.17614594, 8.17763643, 8.18175172, 8.18778704, 8.22797549, 8.23708879, 8.28821888, 8.30281824, 8.30487238, 8.33078119, 8.33420872, 8.34305369, 8.38206152, 8.40403832, 8.41224886, 8.43463245, 8.44389971, 8.46044352, 8.48956655, 8.51149039, 8.51796916, 8.53329742, 8.53599617, 8.56068013, 8.56657166, 8.59814286, 8.61214071, 8.61498351, 8.64246675, 8.65762517, 8.66282683, 8.67384567, 8.71396613, 8.71416081, 8.73722558, 8.73767664, 8.74798782, 8.76129767, 8.76855011, 8.80085479, 8.86199255, 8.89862794, 8.93913818, 8.96782975, 8.9819441, 8.98865031, 9.00024566, 9.00610235, 9.01314955, 9.02095248, 9.03094763, 9.03668298, 9.04652449, 9.0490157, 9.05181691, 9.0646427, 9.1264005, 9.13361863, 9.14618518, 9.15534379, 9.16200272, 9.16524096, 9.19437442, 9.20198553, 9.20475517, 9.28953836, 9.32111331, 9.32181408, 9.32632133, 9.32969553, 9.4558735, 9.45868453, 9.47407654, 9.52846898, 9.54261744, 9.55992241, 9.58831097, 9.59403646, 9.5989721, 9.63828129, 9.66338416, 9.67033722, 9.68634843, 9.7151767, 9.72467937, 9.76497421, 9.77592078, 9.78303691, 9.79368995, 9.7944104, 9.80563761, 9.82690855, 9.82845111, 9.87802691, 9.90843101, 9.91777335, 9.97014496, 9.9763017]) * pq.s, t_stop=10.0 * pq.s)] expected = sp.array([[0.0, 66.05735182], [66.05735182, 0.0]]) actual = stm.victor_purpura_dist(trains) assert_array_almost_equal(expected, actual) def test_allows_use_of_different_kernel(self): k = sigproc.LaplacianKernel(1.0 * pq.s, normalize=False) a = neo.SpikeTrain( sp.array([1.0, 2.0, 4.1, 7.0, 7.1]) * pq.s, t_stop=8.0 * pq.s) b = neo.SpikeTrain( sp.array([4.0, 4.3, 1.2]) * pq.s, t_stop=8.0 * pq.s) # From a to b: # - shift 1.0 to 1.2 (cost 0.3625385) # - delete 2.0 (cost 1.0) # - shift 4.1 to 4.0 (cost 0.1903252) # - shift 4.3 to 7.0 (cost 1.8655890) # - delete 7.0 (cost 1.0) # - delete 7.1 (cost 1.0) expected = sp.array([[0.0, 4.4184526], [4.4184526, 0.0]]) assert_array_almost_equal( expected, stm.victor_purpura_dist([a, b], kernel=k)) def test_allows_q_to_be_zero(self): q = 0.0 a = neo.SpikeTrain( sp.array([1.0, 2.0, 4.1, 7.0, 7.1]) * pq.s, t_stop=8.0 * pq.s) b = neo.SpikeTrain( sp.array([1.2, 4.0, 4.3]) * pq.s, t_stop=8.0 * pq.s) # Pure rate code expected = sp.array([[0.0, 2.0], [2.0, 0.0]]) assert_array_almost_equal(expected, stm.victor_purpura_dist([a, b], q)) class Test_victor_purpura_multiunit_dist(ut.TestCase, CommonMetricTestCases): # With only one spike train each we should get the normal VP distance. def calc_metric(self, trains): return stm.victor_purpura_multiunit_dist({0: trains}, 1) def test_returns_correct_distance_for_multiunits(self): a0 = neo.SpikeTrain(sp.array([1.0, 5.0, 7.0]) * pq.s, t_stop=8.0 * pq.s) a1 = neo.SpikeTrain(sp.array([1.0, 2.0, 5.0]) * pq.s, t_stop=8.0 * pq.s) b0 = neo.SpikeTrain(sp.array([2.0, 4.0, 5.0]) * pq.s, t_stop=8.0 * pq.s) b1 = neo.SpikeTrain(sp.array([3.0, 8.0]) * pq.s, t_stop=9.0 * pq.s) units = {0: [a0, a1], 1: [b0, b1]} reassignment_cost = 0.7 expected = sp.array([[0.0, 4.4], [4.4, 0.0]]) actual = stm.victor_purpura_multiunit_dist(units, reassignment_cost) assert_array_almost_equal(expected, actual) def test_returns_empty_array_if_empty_dict_is_passed(self): expected = sp.zeros((0, 0)) actual = stm.victor_purpura_multiunit_dist({}, 1.0) assert_array_equal(expected, actual) def test_returns_empty_array_if_trials_are_empty(self): expected = sp.zeros((0, 0)) actual = stm.victor_purpura_multiunit_dist({0: [], 1: []}, 1.0) assert_array_equal(expected, actual) def test_raises_exception_if_number_of_trials_differs(self): st = create_empty_spike_train() with self.assertRaises(ValueError): stm.victor_purpura_multiunit_dist({0: [st], 1: [st, st]}, 1.0) if __name__ == '__main__': ut.main() spykeutils-0.4.3/spykeutils/tests/test_tools.py0000644000175000017500000004451412664623646020236 0ustar robrobtry: import unittest2 as ut assert ut # Suppress pyflakes warning about redefinition of unused ut except ImportError: import unittest as ut from builders import arange_spikes from numpy.testing import assert_array_equal, assert_array_almost_equal from spykeutils import tools import neo import neo.io.tools import neo.test.tools import quantities as pq import scipy as sp class TestApplyToDict(ut.TestCase): @staticmethod def fn(train, multiplier=1): return multiplier * train.size def test_maps_function_to_each_spike_train(self): st_dict = {'a': [arange_spikes(5 * pq.s), arange_spikes(4 * pq.s)], 'b': [arange_spikes(7 * pq.s)]} expected = {'a': [4, 3], 'b': [6]} actual = tools.apply_to_dict(self.fn, st_dict) self.assertEqual(expected, actual) def test_works_on_empty_lists(self): st_dict = {'a': [], 'b': []} expected = {'a': [], 'b': []} actual = tools.apply_to_dict(self.fn, st_dict) self.assertEqual(expected, actual) def test_works_on_empty_dict(self): st_dict = {} expected = {} actual = tools.apply_to_dict(self.fn, st_dict) self.assertEqual(expected, actual) def test_allows_to_pass_additional_args(self): st_dict = {'a': [arange_spikes(5 * pq.s), arange_spikes(4 * pq.s)], 'b': [arange_spikes(7 * pq.s)]} expected = {'a': [8, 6], 'b': [12]} actual = tools.apply_to_dict(self.fn, st_dict, 2) self.assertEqual(expected, actual) class TestBinSpikeTrains(ut.TestCase): def test_bins_spike_train_using_its_properties(self): a = neo.SpikeTrain( sp.array([1000.0]) * pq.ms, t_start=500.0 * pq.ms, t_stop=1500.0 * pq.ms) sampling_rate = 4.0 * pq.Hz expected = {0: [sp.array([0, 0, 1, 0])]} expectedBins = sp.array([0.5, 0.75, 1.0, 1.25, 1.5]) * pq.s actual, actualBins = tools.bin_spike_trains({0: [a]}, sampling_rate) self.assertEqual(len(expected), len(actual)) self.assertEqual(len(expected[0]), len(actual[0])) assert_array_equal(expected[0][0], actual[0][0]) assert_array_almost_equal( expectedBins, actualBins.rescale(expectedBins.units)) def test_bins_spike_train_using_passed_properties(self): a = neo.SpikeTrain( sp.array([1.0]) * pq.s, t_start=0.0 * pq.s, t_stop=5.0 * pq.s) sampling_rate = 4.0 * pq.Hz t_start = 0.5 * pq.s t_stop = 1.5 * pq.s expected = {0: [sp.array([0, 0, 1, 0])]} expectedBins = sp.array([0.5, 0.75, 1.0, 1.25, 1.5]) * pq.s actual, actualBins = tools.bin_spike_trains( {0: [a]}, sampling_rate=sampling_rate, t_start=t_start, t_stop=t_stop) self.assertEqual(len(expected), len(actual)) self.assertEqual(len(expected[0]), len(actual[0])) assert_array_equal(expected[0][0], actual[0][0]) assert_array_almost_equal( expectedBins, actualBins.rescale(expectedBins.units)) def test_uses_max_spike_train_interval(self): a = arange_spikes(5 * pq.s) b = arange_spikes(7 * pq.s, 15 * pq.s) sampling_rate = 4.0 * pq.Hz expectedBins = sp.arange(0.0, 15.1, 0.25) * pq.s actual, actualBins = tools.bin_spike_trains( {0: [a, b]}, sampling_rate=sampling_rate) assert_array_almost_equal( expectedBins, actualBins.rescale(expectedBins.units)) def test_handles_bin_size_which_is_not_divisor_of_duration(self): a = arange_spikes(5 * pq.s) sampling_rate = 1.0 / 1.3 * pq.Hz expected = {0: [sp.array([1, 1, 1, 1])]} expectedBins = sp.array([0.0, 1.3, 2.6, 3.9, 5.2]) * pq.s actual, actualBins = tools.bin_spike_trains({0: [a]}, sampling_rate) self.assertEqual(len(expected), len(actual)) self.assertEqual(len(expected[0]), len(actual[0])) assert_array_equal(expected[0][0], actual[0][0]) assert_array_almost_equal( expectedBins, actualBins.rescale(expectedBins.units)) class TestConcatenateSpikeTrains(ut.TestCase): def test_concatenates_spike_trains(self): a = arange_spikes(3.0 * pq.s) b = arange_spikes(2.0 * pq.s, 5.0 * pq.s) expected = arange_spikes(5.0 * pq.s) actual = tools.concatenate_spike_trains((a, b)) assert_array_almost_equal(expected, actual) def test_t_start_is_min_of_all_trains(self): a = arange_spikes(3.0 * pq.s, 5.0 * pq.s) b = arange_spikes(1.0 * pq.s, 6.0 * pq.s) expected = 1.0 * pq.s actual = tools.concatenate_spike_trains((a, b)).t_start self.assertAlmostEqual(expected, actual) def test_t_stop_is_max_of_all_trains(self): a = arange_spikes(3.0 * pq.s, 5.0 * pq.s) b = arange_spikes(1.0 * pq.s, 6.0 * pq.s) expected = 6.0 * pq.s actual = tools.concatenate_spike_trains((a, b)).t_stop self.assertAlmostEqual(expected, actual) class TestRemoveFromHierarchy(ut.TestCase): SEGMENTS = 5 CHANNEL_GROUPS = 4 UNITS = 3 CHANNELS = 4 @classmethod def create_hierarchy(cls, many_to_many): b = neo.Block() for ns in range(cls.SEGMENTS): b.segments.append(neo.Segment()) channels = [] if many_to_many: channels = [neo.RecordingChannel(name='Shared %d' % i, index=i + cls.CHANNELS) for i in range(cls.CHANNELS / 2)] for ng in range(cls.CHANNEL_GROUPS): rcg = neo.RecordingChannelGroup() for nu in range(cls.UNITS): unit = neo.Unit() for ns in range(cls.SEGMENTS): spike = neo.Spike(0 * pq.s) unit.spikes.append(spike) b.segments[ns].spikes.append(spike) st = neo.SpikeTrain([] * pq.s, 0 * pq.s) unit.spiketrains.append(st) b.segments[ns].spiketrains.append(st) rcg.units.append(unit) if not many_to_many: for nc in range(cls.CHANNELS): rc = neo.RecordingChannel( name='Single %d' % nc, index=nc) rc.recordingchannelgroups.append(rcg) rcg.recordingchannels.append(rc) else: for nc in range(cls.CHANNELS): if nc % 2 == 0: rc = neo.RecordingChannel( name='Single %d' % (nc / 2), index=nc / 2) else: rc = channels[nc / 2] rc.recordingchannelgroups.append(rcg) rcg.recordingchannels.append(rc) rcg.channel_indexes = sp.array( [c.index for c in rcg.recordingchannels]) rcg.channel_names = sp.array( [c.name for c in rcg.recordingchannels]) b.recordingchannelgroups.append(rcg) try: neo.io.tools.create_many_to_one_relationship(b) except AttributeError: b.create_many_to_one_relationship() return b def test_remove_block(self): block = self.create_hierarchy(False) comp = self.create_hierarchy(False) tools.remove_from_hierarchy(block) neo.test.tools.assert_same_sub_schema(block, comp) def test_remove_segment_no_orphans(self): block = self.create_hierarchy(False) comp = self.create_hierarchy(False) seg = block.segments[1] tools.remove_from_hierarchy(seg) self.assertFalse(seg in block.segments) self.assertEqual(len(block.list_units), self.UNITS * self.CHANNEL_GROUPS) for u in block.list_units: self.assertEqual(len(u.spikes), self.SEGMENTS - 1) self.assertEqual(len(u.spiketrains), self.SEGMENTS - 1) neo.test.tools.assert_same_sub_schema(seg, comp.segments[1]) def test_remove_segment_keep_orphans(self): block = self.create_hierarchy(False) comp = self.create_hierarchy(False) seg = block.segments[1] tools.remove_from_hierarchy(seg, False) self.assertFalse(seg in block.segments) self.assertEqual(len(block.list_units), self.UNITS * self.CHANNEL_GROUPS) for u in block.list_units: self.assertEqual(len(u.spikes), self.SEGMENTS) self.assertEqual(len(u.spiketrains), self.SEGMENTS) neo.test.tools.assert_same_sub_schema(seg, comp.segments[1]) def test_remove_channel_group_no_orphans(self): block = self.create_hierarchy(False) comp = self.create_hierarchy(False) rcg = block.recordingchannelgroups[1] tools.remove_from_hierarchy(rcg) self.assertFalse(rcg in block.recordingchannelgroups) self.assertEqual(len(block.segments), self.SEGMENTS) for s in block.segments: self.assertEqual(len(s.spikes), self.UNITS * (self.CHANNEL_GROUPS - 1)) self.assertEqual(len(s.spiketrains), self.UNITS * (self.CHANNEL_GROUPS - 1)) neo.test.tools.assert_same_sub_schema(rcg, comp.recordingchannelgroups[1]) def test_remove_channel_group_keep_orphans(self): block = self.create_hierarchy(False) comp = self.create_hierarchy(False) rcg = block.recordingchannelgroups[1] tools.remove_from_hierarchy(rcg, False) self.assertFalse(rcg in block.recordingchannelgroups) self.assertEqual(len(block.segments), self.SEGMENTS) for s in block.segments: self.assertEqual(len(s.spikes), self.UNITS * self.CHANNEL_GROUPS) self.assertEqual(len(s.spiketrains), self.UNITS * self.CHANNEL_GROUPS) neo.test.tools.assert_same_sub_schema(rcg, comp.recordingchannelgroups[1]) def test_remove_channel(self): block = self.create_hierarchy(False) comp = self.create_hierarchy(False) rc = block.list_recordingchannels[5] tools.remove_from_hierarchy(rc) self.assertFalse(rc in block.list_recordingchannels) neo.test.tools.assert_same_sub_schema(rc, comp.list_recordingchannels[5]) self.assertEqual(len(block.segments), self.SEGMENTS) self.assertEqual(len(block.recordingchannelgroups), self.CHANNEL_GROUPS) self.assertEqual(len(block.list_recordingchannels), self.CHANNEL_GROUPS * self.CHANNELS - 1) # Should be removed from its own channel group rcg = rc.recordingchannelgroups[0] self.assertEqual(len(rcg.recordingchannels), self.CHANNELS - 1) self.assertEqual(rcg.channel_indexes.shape[0], self.CHANNELS - 1) self.assertEqual(rcg.channel_names.shape[0], self.CHANNELS - 1) self.assertFalse(rc.index in rcg.channel_indexes) self.assertFalse(rc.name in rcg.channel_names) def test_remove_unique_channel_many_to_many(self): block = self.create_hierarchy(True) comp = self.create_hierarchy(True) self.assertEqual( len(block.list_recordingchannels), self.CHANNEL_GROUPS * (self.CHANNELS / 2) + (self.CHANNELS / 2)) rc = block.list_recordingchannels[0] # Unique channel tools.remove_from_hierarchy(rc) neo.test.tools.assert_same_sub_schema(rc, comp.list_recordingchannels[0]) self.assertFalse(rc in block.list_recordingchannels) self.assertEqual(len(block.segments), self.SEGMENTS) self.assertEqual(len(block.recordingchannelgroups), self.CHANNEL_GROUPS) self.assertEqual( len(block.list_recordingchannels), self.CHANNEL_GROUPS * (self.CHANNELS / 2) + (self.CHANNELS / 2) - 1) # Should be removed from its own channel group rcg = rc.recordingchannelgroups[0] self.assertEqual(len(rcg.recordingchannels), self.CHANNELS - 1) self.assertEqual(rcg.channel_indexes.shape[0], self.CHANNELS - 1) self.assertEqual(rcg.channel_names.shape[0], self.CHANNELS - 1) self.assertFalse(rc.index in rcg.channel_indexes) self.assertFalse(rc.name in rcg.channel_names) def test_remove_shared_channel_many_to_many(self): block = self.create_hierarchy(True) comp = self.create_hierarchy(True) self.assertEqual( len(block.list_recordingchannels), self.CHANNEL_GROUPS * (self.CHANNELS / 2) + (self.CHANNELS / 2)) rc = block.list_recordingchannels[1] # Shared channel tools.remove_from_hierarchy(rc) neo.test.tools.assert_same_sub_schema(rc, comp.list_recordingchannels[1]) self.assertFalse(rc in block.list_recordingchannels) self.assertEqual(len(block.segments), self.SEGMENTS) self.assertEqual(len(block.recordingchannelgroups), self.CHANNEL_GROUPS) self.assertEqual( len(block.list_recordingchannels), self.CHANNEL_GROUPS * (self.CHANNELS / 2) + (self.CHANNELS / 2) - 1) # Should be removed from all channel groups for rcg in block.recordingchannelgroups: self.assertEqual(len(rcg.recordingchannels), self.CHANNELS - 1) self.assertEqual(rcg.channel_indexes.shape[0], self.CHANNELS - 1) self.assertEqual(rcg.channel_names.shape[0], self.CHANNELS - 1) self.assertFalse(rc.index in rcg.channel_indexes) self.assertFalse(rc.name in rcg.channel_names) def test_remove_unit_no_orphans(self): block = self.create_hierarchy(False) comp = self.create_hierarchy(False) unit = block.list_units[5] tools.remove_from_hierarchy(unit) self.assertFalse(unit in block.list_units) self.assertEqual(len(block.list_units), self.UNITS * self.CHANNEL_GROUPS - 1) self.assertEqual(len(block.segments), self.SEGMENTS) self.assertEqual(len(block.recordingchannelgroups), self.CHANNEL_GROUPS) for seg in block.segments: self.assertEqual(len(seg.spikes), self.UNITS * self.CHANNEL_GROUPS - 1) self.assertEqual(len(seg.spiketrains), self.UNITS * self.CHANNEL_GROUPS - 1) self.assertFalse(unit in [s.unit for s in seg.spikes]) self.assertFalse(unit in [st.unit for st in seg.spiketrains]) neo.test.tools.assert_same_sub_schema(unit, comp.list_units[5]) def test_remove_unit_keep_orphans(self): block = self.create_hierarchy(False) comp = self.create_hierarchy(False) unit = block.list_units[5] tools.remove_from_hierarchy(unit, False) self.assertFalse(unit in block.list_units) self.assertEqual(len(block.list_units), self.UNITS * self.CHANNEL_GROUPS - 1) self.assertEqual(len(block.segments), self.SEGMENTS) self.assertEqual(len(block.recordingchannelgroups), self.CHANNEL_GROUPS) for seg in block.segments: self.assertEqual(len(seg.spikes), self.UNITS * self.CHANNEL_GROUPS) self.assertEqual(len(seg.spiketrains), self.UNITS * self.CHANNEL_GROUPS) self.assertFalse(unit in [s.unit for s in seg.spikes]) self.assertFalse(unit in [st.unit for st in seg.spiketrains]) neo.test.tools.assert_same_sub_schema(unit, comp.list_units[5]) def test_remove_spike(self): unit = neo.Unit() segment = neo.Segment() s = neo.Spike(0 * pq.s) unit.spikes.append(s) segment.spikes.append(s) s.unit = unit s.segment = segment st = neo.SpikeTrain([] * pq.s, 0 * pq.s) unit.spiketrains.append(st) segment.spiketrains.append(st) st.unit = unit st.segment = segment tools.remove_from_hierarchy(s) self.assertTrue(st in unit.spiketrains) self.assertTrue(st in segment.spiketrains) self.assertFalse(s in unit.spikes) self.assertFalse(s in segment.spikes) def test_remove_spiketrain(self): unit = neo.Unit() segment = neo.Segment() s = neo.Spike(0 * pq.s) unit.spikes.append(s) segment.spikes.append(s) s.unit = unit s.segment = segment st = neo.SpikeTrain([] * pq.s, 0 * pq.s) unit.spiketrains.append(st) segment.spiketrains.append(st) st.unit = unit st.segment = segment tools.remove_from_hierarchy(st) self.assertTrue(s in unit.spikes) self.assertTrue(s in segment.spikes) self.assertFalse(st in unit.spiketrains) self.assertFalse(st in segment.spiketrains) def test_extract_spikes(self): s1 = sp.zeros(10000) s2 = sp.ones(10000) t = sp.arange(0.0, 10.1, 1.0) sig1 = neo.AnalogSignal(s1 * pq.uV, sampling_rate=pq.kHz) sig2 = neo.AnalogSignal(s2 * pq.uV, sampling_rate=pq.kHz) train = neo.SpikeTrain(t * pq.s, 10 * pq.s) spikes = tools.extract_spikes( train, [sig1, sig2], 100 * pq.ms, 10 * pq.ms) self.assertEqual(len(spikes), 9) for s in spikes: self.assertAlmostEqual(s.waveform[:, 0].mean(), 0.0) self.assertAlmostEqual(s.waveform[:, 1].mean(), 1.0) def test_extract_different_spikes(self): s1 = sp.ones(10500) s2 = -sp.ones(10500) for i in xrange(10): s1[i * 1000 + 500:i * 1000 + 1500] *= i s2[i * 1000 + 500:i * 1000 + 1500] *= i t = sp.arange(0.0, 10.1, 1.0) sig1 = neo.AnalogSignal(s1 * pq.uV, sampling_rate=pq.kHz) sig2 = neo.AnalogSignal(s2 * pq.uV, sampling_rate=pq.kHz) train = neo.SpikeTrain(t * pq.s, 10 * pq.s) spikes = tools.extract_spikes( train, [sig1, sig2], 100 * pq.ms, 10 * pq.ms) self.assertEqual(len(spikes), 10) for i, s in enumerate(spikes): self.assertAlmostEqual(s.waveform[:, 0].mean(), i) self.assertAlmostEqual(s.waveform[:, 1].mean(), -i) if __name__ == '__main__': ut.main() spykeutils-0.4.3/spykeutils/tests/test_spike_train_generation.py0000644000175000017500000000575412664623646023624 0ustar robrobtry: import unittest2 as ut assert ut # Suppress pyflakes warning about redefinition of unused ut except ImportError: import unittest as ut import neo import quantities as pq import scipy as sp import spykeutils.spike_train_generation as stg class CommonSpikeTrainGeneratorTests(object): """ Provides some common test cases which should work for all spike train generation functions. """ defaultRate = 10 * pq.Hz lowRate = 1 * pq.Hz highRate = 10000 * pq.Hz def invoke_gen_func(self, rate, **kwargs): """ This function is called to generate a spike train by the test cases. """ raise NotImplementedError() def test_returns_SpikeTrain_containing_spikes(self): st = self.invoke_gen_func(self.defaultRate, t_stop=100 * pq.s) self.assertIsInstance(st, neo.SpikeTrain) self.assertTrue(st.size > 0) def test_exception_without_end_condition(self): with self.assertRaises(ValueError): self.invoke_gen_func(self.defaultRate, t_stop=None) def test_times_limited_by_t_start_and_t_stop(self): t_start = 10 * pq.s t_stop = 20 * pq.s st = self.invoke_gen_func( self.defaultRate, t_start=t_start, t_stop=t_stop) self.assertTrue(sp.all(t_start < st)) self.assertTrue(sp.all(st <= t_stop)) self.assertEqual(t_start, st.t_start) self.assertEqual(t_stop, st.t_stop) def test_num_spikes_limited_by_max_spike(self): max_spikes = 10 # Use a high rate to provoke more than `max_spike` spikes. self.assertTrue( max_spikes >= self.invoke_gen_func( self.highRate, max_spikes=max_spikes).size) # Use a long trial length to provoke more than `max_spike` spikes. self.assertTrue( max_spikes >= self.invoke_gen_func( self.lowRate, t_stop=10000 * pq.s, max_spikes=max_spikes).size) def test_respects_refractory_period(self): refractory = 100 * pq.ms st = self.invoke_gen_func( self.highRate, max_spikes=1000, refractory=refractory) self.assertGreater( sp.amax(sp.absolute(sp.diff(st.rescale(pq.s).magnitude))), refractory.rescale(pq.s).magnitude) st = self.invoke_gen_func( self.highRate, t_stop=10 * pq.s, refractory=refractory) self.assertGreater( sp.amax(sp.absolute(sp.diff(st.rescale(pq.s).magnitude))), refractory.rescale(pq.s).magnitude) class Test_gen_homogeneous_poisson(ut.TestCase, CommonSpikeTrainGeneratorTests): def invoke_gen_func(self, rate, **kwargs): return stg.gen_homogeneous_poisson(rate, **kwargs) class Test_gen_inhomogeneous_poisson( ut.TestCase, CommonSpikeTrainGeneratorTests): def invoke_gen_func(self, max_rate, **kwargs): modulation = lambda ts: sp.sin(ts / (5.0 * pq.s) * sp.pi) return stg.gen_inhomogeneous_poisson(modulation, max_rate, **kwargs) if __name__ == '__main__': ut.main() spykeutils-0.4.3/spykeutils/tests/test_quality_assesment.py0000644000175000017500000003167012664623646022647 0ustar robrobtry: import unittest2 as ut except ImportError: import unittest as ut import scipy as sp import numpy as np import quantities as pq import neo from neo.test.tools import assert_arrays_equal import spykeutils.sorting_quality_assesment as qa import sys class TestRefractoryPeriod(ut.TestCase): def test_refperiod_violations_empty(self): num, d = qa.get_refperiod_violations({}, 1 * pq.ms) self.assertEqual(num, 0) self.assertEqual(d, {}) def test_refperiod_violations(self): t1 = sp.array([0, 5, 10, 12, 17, 18]) t2 = sp.array([20000, 18000, 14000, 9000, 0, 5000]) t3 = sp.array([1, 2, 3]) st1 = neo.SpikeTrain(t1 * pq.s, 20 * pq.s) st2 = neo.SpikeTrain(t2 * pq.ms, 20 * pq.s) st3 = neo.SpikeTrain(t3 * pq.h, 3 * pq.h) r1, r2 = qa.get_refperiod_violations({0: [st1], 1: [st2, st3]}, 3.0 * pq.s) self.assertEqual(r1, 3, 'Total number of refractory period violations incorrect') assert_arrays_equal(r2[0][0], neo.SpikeTrain(sp.array([10, 17]) * pq.s, 20 * pq.s)) assert_arrays_equal(r2[1][0], neo.SpikeTrain(sp.array([18]) * pq.s, 20 * pq.s)) assert_arrays_equal(r2[1][1], neo.SpikeTrain(sp.array([]) * pq.s, 10800 * pq.ms)) def test_refperiod_fp_empty(self): self.assertEqual( qa.calculate_refperiod_fp({}, 1 * pq.ms, {}, 10 * pq.ms), {}) def test_refperiod_fp(self): r = qa.calculate_refperiod_fp({1: 100, 2: 100, 3: 100}, 2 * pq.ms, {1: 19, 2: 100, 3: 200}, 100 * pq.ms) self.assertAlmostEqual(r[1], 0.05) self.assertAlmostEqual(r[2], 0.5) self.assertAlmostEqual(r[3], 1.0) class TestClusterOverlap(ut.TestCase): def setUp(self): # Have tests always use the same random seed self.rand = np.random.RandomState() self.rand.seed(1704) def test_1_cluster(self): # One cluster cannot have overlaps... cluster1 = self.rand.randn(8, 100) clusterList1 = [cluster1[:, i] for i in xrange(sp.size(cluster1, 1))] total, pair = qa.overlap_fp_fn({1: clusterList1}) self.assertEqual(total[1][0], 0.0) self.assertEqual(total[1][1], 0.0) self.assertEqual(pair, {}) def test_equal_clusters_white(self): cluster1 = self.rand.randn(40, 1000) cluster2 = self.rand.randn(40, 1000) clusterList1 = [cluster1[:, i] for i in xrange(sp.size(cluster1, 1))] clusterList2 = [cluster2[:, i] for i in xrange(sp.size(cluster2, 1))] total, pair = qa.overlap_fp_fn( {1: clusterList1, 2: clusterList2}, means={1: sp.zeros(40), 2: sp.zeros(40)}, covariances='white') self.assertAlmostEqual(total[1][0], 0.5) self.assertAlmostEqual(total[1][1], 0.5) self.assertAlmostEqual(total[2][0], 0.5) self.assertAlmostEqual(total[2][1], 0.5) self.assertAlmostEqual(pair[1][2][0], 0.5) self.assertAlmostEqual(pair[1][2][1], 0.5) self.assertAlmostEqual(pair[2][1][0], 0.5) self.assertAlmostEqual(pair[2][1][1], 0.5) def test_equal_clusters_estimate_mean(self): # Smaller dimensionality and more data for reliable estimates cluster1 = self.rand.randn(8, 100000) cluster2 = self.rand.randn(8, 100000) clusterList1 = [cluster1[:, i] for i in xrange(sp.size(cluster1, 1))] clusterList2 = [cluster2[:, i] for i in xrange(sp.size(cluster2, 1))] total, pair = qa.overlap_fp_fn( {1: clusterList1, 2: clusterList2}, means={1: sp.zeros(8), 2: sp.zeros(8)}) self.assertAlmostEqual(total[1][0], 0.5, 3) self.assertAlmostEqual(total[1][1], 0.5, 3) self.assertAlmostEqual(total[2][0], 0.5, 3) self.assertAlmostEqual(total[2][1], 0.5, 3) self.assertAlmostEqual(pair[1][2][0], 0.5, 3) self.assertAlmostEqual(pair[1][2][1], 0.5, 3) self.assertAlmostEqual(pair[2][1][0], 0.5, 3) self.assertAlmostEqual(pair[2][1][1], 0.5, 3) def test_equal_clusters_estimate_all(self): # Smaller dimensionality and more data for reliable estimates cluster1 = self.rand.randn(8, 100000) cluster2 = self.rand.randn(8, 100000) clusterList1 = [cluster1[:, i] for i in xrange(sp.size(cluster1, 1))] clusterList2 = [cluster2[:, i] for i in xrange(sp.size(cluster2, 1))] total, pair = qa.overlap_fp_fn( {1: clusterList1, 2: clusterList2}) self.assertAlmostEqual(total[1][0], 0.5, 2) self.assertAlmostEqual(total[1][1], 0.5, 2) self.assertAlmostEqual(total[2][0], 0.5, 2) self.assertAlmostEqual(total[2][1], 0.5, 2) self.assertAlmostEqual(pair[1][2][0], 0.5, 2) self.assertAlmostEqual(pair[1][2][1], 0.5, 2) self.assertAlmostEqual(pair[2][1][0], 0.5, 2) self.assertAlmostEqual(pair[2][1][1], 0.5, 2) def test_unequal_clusters(self): cluster1 = self.rand.randn(40, 1000) cluster2 = self.rand.randn(40, 2000) clusterList1 = [cluster1[:, i] for i in xrange(sp.size(cluster1, 1))] clusterList2 = [cluster2[:, i] for i in xrange(sp.size(cluster2, 1))] total, pair = qa.overlap_fp_fn( {1: clusterList1, 2: clusterList2}, {1: sp.zeros(40), 2: sp.zeros(40)}, {1: sp.eye(40), 2: sp.eye(40)}) self.assertAlmostEqual(total[1][0], 2.0 / 3.0) self.assertAlmostEqual(total[1][1], 2.0 / 3.0) self.assertAlmostEqual(total[2][0], 1.0 / 3.0) self.assertAlmostEqual(total[2][1], 1.0 / 3.0) self.assertAlmostEqual(pair[1][2][0], 2.0 / 3.0) self.assertAlmostEqual(pair[1][2][1], 2.0 / 3.0) self.assertAlmostEqual(pair[2][1][0], 1.0 / 3.0) self.assertAlmostEqual(pair[2][1][1], 1.0 / 3.0) def test_far_apart_clusters_estimate_all(self): cluster1 = self.rand.randn(40, 10000) cluster2 = self.rand.randn(40, 10000) * 2 cluster2[0, :] += 10 clusterList1 = [cluster1[:, i] for i in xrange(sp.size(cluster1, 1))] clusterList2 = [cluster2[:, i] for i in xrange(sp.size(cluster2, 1))] total, pair = qa.overlap_fp_fn( {1: clusterList1, 2: clusterList2}) self.assertLess(total[1][0], 1e-4) self.assertLess(total[1][1], 1e-4) self.assertLess(total[2][0], 1e-4) self.assertLess(total[2][1], 1e-4) self.assertLess(pair[1][2][0], 1e-4) self.assertLess(pair[1][2][1], 1e-4) self.assertLess(pair[2][1][0], 1e-4) self.assertLess(pair[2][1][1], 1e-4) self.assertGreater(total[1][0], 0.0) self.assertGreater(total[1][1], 0.0) self.assertGreater(total[2][0], 0.0) self.assertGreater(total[2][1], 0.0) self.assertGreater(pair[1][2][0], 0.0) self.assertGreater(pair[1][2][1], 0.0) self.assertGreater(pair[2][1][0], 0.0) self.assertGreater(pair[2][1][1], 0.0) def test_3_clusters_estimate_means(self): cluster1 = self.rand.randn(20, 10000) cluster2 = self.rand.randn(20, 20000) cluster3 = self.rand.randn(20, 10000) cluster3[5, :] += 11 clusterList1 = [cluster1[:, i] for i in xrange(sp.size(cluster1, 1))] clusterList2 = [cluster2[:, i] for i in xrange(sp.size(cluster2, 1))] clusterList3 = [cluster3[:, i] for i in xrange(sp.size(cluster3, 1))] total, pair = qa.overlap_fp_fn( {1: clusterList1, 2: clusterList2, 3: clusterList3}, covariances={1: sp.eye(20), 2: sp.eye(20), 3: sp.eye(20) * 1.5}) self.assertAlmostEqual(total[1][0], 2.0 / 3.0, 2) self.assertAlmostEqual(total[1][1], 2.0 / 3.0, 2) self.assertAlmostEqual(total[2][0], 1.0 / 3.0, 2) self.assertAlmostEqual(total[2][1], 1.0 / 3.0, 2) self.assertLess(total[3][0], 1e-4) self.assertLess(total[3][1], 1e-4) self.assertGreater(total[3][0], 0.0) self.assertGreater(total[3][1], 0.0) self.assertAlmostEqual(pair[1][2][0], 2.0 / 3.0, 2) self.assertAlmostEqual(pair[1][2][1], 2.0 / 3.0, 2) self.assertLess(pair[1][3][0], 1e-4) self.assertLess(pair[1][3][1], 1e-4) self.assertGreater(pair[1][3][0], 0.0) self.assertGreater(pair[1][3][1], 0.0) self.assertAlmostEqual(pair[2][1][0], 1.0 / 3.0, 2) self.assertAlmostEqual(pair[2][1][1], 1.0 / 3.0, 2) self.assertLess(pair[2][3][0], 1e-4) self.assertLess(pair[2][3][1], 1e-4) self.assertGreater(pair[2][3][0], 0.0) self.assertGreater(pair[2][3][1], 0.0) self.assertLess(pair[3][1][0], 1e-4) self.assertLess(pair[3][1][1], 1e-4) self.assertGreater(pair[3][1][0], 0.0) self.assertGreater(pair[3][1][1], 0.0) self.assertLess(pair[3][2][0], 1e-4) self.assertLess(pair[3][2][1], 1e-4) self.assertGreater(pair[3][2][0], 0.0) self.assertGreater(pair[3][2][1], 0.0) def test_spike_objects(self): dimension = 40 offset = sp.zeros((dimension, 1)) offset[0] = 4 cluster1 = self.rand.randn(dimension, 10) cluster2 = self.rand.randn(dimension, 100) + offset cluster3 = self.rand.randn(dimension, 500) - offset clusterList1 = [cluster1[:, i] for i in xrange(sp.size(cluster1, 1))] clusterList2 = [cluster2[:, i] for i in xrange(sp.size(cluster2, 1))] clusterList3 = [cluster3[:, i] for i in xrange(sp.size(cluster3, 1))] mean1 = sp.zeros(dimension) mean2 = offset.flatten() mean3 = -mean2 total, pair = qa.overlap_fp_fn( {1: clusterList1, 2: clusterList2, 3: clusterList3}, means={1: mean1, 2: mean2, 3: mean3}, covariances={1: sp.eye(dimension), 2: sp.eye(dimension), 3: sp.eye(dimension)}) # Replacing some arrays with Spike objects mean2 = neo.Spike(waveform=mean2.reshape(-1, 4) / 1000.0 * pq.mV) mean3 = neo.Spike(waveform=mean3.reshape(-1, 4) / 1e6 * pq.V) newClusterList = [] for s in clusterList1: newClusterList.append( neo.Spike(waveform=s.reshape(-1, 4) / 1000.0 * pq.mV)) total_s, pair_s = qa.overlap_fp_fn( {1: newClusterList, 2: clusterList2, 3: clusterList3}, means={1: mean1, 2: mean2, 3: mean3}, covariances='white') # Results should be identical for arrays and Spike objects for i in total.keys(): self.assertAlmostEqual(total[i][0], total_s[i][0]) self.assertAlmostEqual(total[i][1], total_s[i][1]) for j in pair[i].keys(): self.assertAlmostEqual(pair[i][j][0], pair_s[i][j][0]) self.assertAlmostEqual(pair[i][j][1], pair_s[i][j][1]) class TestVarianceExplained(ut.TestCase): def test_trains(self): trains = {} trains[0] = neo.SpikeTrain( sp.zeros(2) * pq.ms, 0 * pq.ms, waveforms=[[[-1, -2], [1, 2]], [[-1, -2], [1, 2]]] * pq.mV) trains[1] = trains[0] means = {} means[1] = neo.Spike(0 * pq.ms, waveform=[[-1, -1], [1, 1]] * pq.mV) exp = qa.variance_explained(trains, means) self.assertAlmostEqual(exp[0][0], 1) self.assertAlmostEqual(exp[0][0], 1) self.assertAlmostEqual(exp[1][0], 1) self.assertAlmostEqual(exp[1][1], 0.75) def test_spikes(self): trains = {} trains[0] = [neo.Spike(0 * pq.ms, waveform=[[-1, -2], [1, 2]] * pq.mV), neo.Spike(0 * pq.ms, waveform=[[-1, -2], [1, 2]] * pq.mV)] trains[1] = trains[0] means = {} means[0] = neo.Spike(0 * pq.ms, waveform=[[-1, -1], [1, 1]] * pq.mV) exp = qa.variance_explained(trains, means) self.assertAlmostEqual(exp[1][0], 1) self.assertAlmostEqual(exp[1][0], 1) self.assertAlmostEqual(exp[0][0], 1) self.assertAlmostEqual(exp[0][1], 0.75) def test_noise(self): trains = {} trains[0] = neo.SpikeTrain( sp.zeros(2) * pq.ms, 0 * pq.ms, waveforms=[[[-1, -2], [1, 2]], [[-1, -2], [1, 2]]] * pq.mV) trains[1] = trains[0] means = {} means[1] = neo.Spike(0 * pq.ms, waveform=[[-1, -1], [1, 1]] * pq.mV) noise = [0, 1] * pq.mV ** 2 exp = qa.variance_explained(trains, means, noise) self.assertAlmostEqual(exp[0][0], 1) self.assertAlmostEqual(exp[0][0], 1) self.assertAlmostEqual(exp[1][0], 1) self.assertAlmostEqual(exp[1][1], 1) if __name__ == '__main__': ut.main()spykeutils-0.4.3/spykeutils/tests/test_signal_processing.py0000644000175000017500000003610212664623646022601 0ustar robrob try: import unittest2 as ut assert ut # Suppress pyflakes warning about redefinition of unused ut except ImportError: import unittest as ut from builders import create_empty_spike_train from mock import MagicMock from numpy.testing import assert_array_almost_equal, assert_array_equal import neo import quantities as pq import scipy as sp import spykeutils.signal_processing as sigproc class TestKernel(ut.TestCase): def test_call_returns_result_of_evaluate(self): kernel_size = 1.3 kernel = sigproc.Kernel(kernel_size, normalize=False) kernel._evaluate = MagicMock(name='_evaluate') kernel._evaluate.return_value = 42 t = 3.3 actual = kernel(t) kernel._evaluate.assert_called_with(t, kernel_size) self.assertEqual(kernel._evaluate.return_value, actual) def test_call_can_normalize_evaluate(self): kernel_size = 1.3 kernel = sigproc.Kernel(kernel_size, normalize=True) kernel._evaluate = MagicMock(name='_evaluate') kernel._evaluate.return_value = 42 kernel.normalization_factor = MagicMock(name='normalization_factor') kernel.normalization_factor.return_value = 0.5 t = 3.3 actual = kernel(t) kernel._evaluate.assert_called_with(t, kernel_size) kernel.normalization_factor.assert_called_with(kernel_size) self.assertEqual( kernel.normalization_factor.return_value * kernel._evaluate.return_value, actual) def test_call_can_overwrite_kernel_size(self): kernel_size = 1.3 kernel = sigproc.Kernel(5.6, normalize=True) kernel._evaluate = MagicMock(name='_evaluate') kernel._evaluate.return_value = 42 kernel.normalization_factor = MagicMock(name='normalization_factor') kernel.normalization_factor.return_value = 0.5 t = 3.3 actual = kernel(t, kernel_size) kernel._evaluate.assert_called_with(t, kernel_size) kernel.normalization_factor.assert_called_with(kernel_size) self.assertEqual( kernel.normalization_factor.return_value * kernel._evaluate.return_value, actual) def test_summed_dist_matrix(self): kernel = sigproc.Kernel(1.0, normalize=False) kernel._evaluate = lambda t, _: t vectors = [sp.array([2.0, 1.0, 3.0]), sp.array([1.5, 4.0])] expected = sp.array([[0.0, -4.5], [4.5, 0.0]]) actual = kernel.summed_dist_matrix(vectors) assert_array_almost_equal(expected, actual) def test_summed_dist_matrix_with_units(self): kernel = sigproc.Kernel(2000 * pq.ms, normalize=False) kernel._evaluate = lambda t, size: 1.0 / size / size * t vectors = [sp.array([2.0, 1.0, 3.0]) * pq.s, sp.array([1500, 4000]) * pq.ms] expected = sp.array([[0.0, -1.125], [1.125, 0.0]]) / pq.s actual = kernel.summed_dist_matrix(vectors) assert_array_almost_equal(expected, actual.rescale(1.0 / pq.s)) class TestSymmetricKernel(ut.TestCase): def test_summed_dist_matrix(self): kernel = sigproc.Kernel(1.0, normalize=False) kernel._evaluate = lambda t, _: sp.absolute(t) vectors = [sp.array([2.0, 1.0, 3.0]), sp.array([1.5, 4.0])] expected = sp.array([[8.0, 8.5], [8.5, 5.0]]) actual = kernel.summed_dist_matrix(vectors) assert_array_almost_equal(expected, actual) def test_summed_dist_matrix_with_units(self): kernel = sigproc.Kernel(2000 * pq.ms, normalize=False) kernel._evaluate = lambda t, size: 1.0 / size / size * sp.absolute(t) vectors = [sp.array([2.0, 1.0, 3.0]) * pq.s, sp.array([1500, 4000]) * pq.ms] expected = sp.array([[2.0, 2.125], [2.125, 1.25]]) / pq.s actual = kernel.summed_dist_matrix(vectors) assert_array_almost_equal(expected, actual.rescale(1.0 / pq.s)) class Test_discretize_kernel(ut.TestCase): def test_discretizes_requested_area(self): kernel = sigproc.Kernel(1.0, normalize=False) kernel.boundary_enclosing_at_least = MagicMock( name='boundary_enclosing_at_least') kernel.boundary_enclosing_at_least.return_value = 2.0 kernel._evaluate = lambda x, _: sp.ones(len(x)) sampling_rate = 1.0 mock_discretization = sp.ones(5) kernel_area_fraction = 0.5 actual = sigproc.discretize_kernel( kernel, sampling_rate, kernel_area_fraction) kernel.boundary_enclosing_at_least.assert_called_with( kernel_area_fraction) assert_array_equal(actual, mock_discretization) def test_discretizes_requested_area_with_units(self): kernel = sigproc.Kernel(100.0 * pq.ms, normalize=False) kernel.boundary_enclosing_at_least = MagicMock( name='boundary_enclosing_at_least') kernel.boundary_enclosing_at_least.return_value = 100.0 * pq.ms kernel._evaluate = lambda x, _: sp.ones(len(x)) sampling_rate = 10.0 * pq.Hz mock_discretization = sp.ones(3) kernel_area_fraction = 0.5 actual = sigproc.discretize_kernel( kernel, sampling_rate, kernel_area_fraction) kernel.boundary_enclosing_at_least.assert_called_with( kernel_area_fraction) assert_array_equal(actual, mock_discretization) def test_discretizes_requested_number_of_bins(self): kernel = sigproc.Kernel(1.0, normalize=False) kernel._evaluate = lambda x, _: sp.ones(len(x)) sampling_rate = 1.0 num_bins = 23 mock_discretization = sp.ones(num_bins) actual = sigproc.discretize_kernel( kernel, sampling_rate, num_bins=num_bins) assert_array_equal(actual, mock_discretization) def test_can_normalize_to_unit_area(self): kernel = sigproc.Kernel(1.0, normalize=False) kernel._evaluate = lambda x, _: sp.ones(len(x)) sampling_rate = 2.0 num_bins = 20 mock_discretization = sp.ones(num_bins) / num_bins * sampling_rate actual = sigproc.discretize_kernel( kernel, sampling_rate, num_bins=num_bins, ensure_unit_area=True) assert_array_equal(actual, mock_discretization) class TestCausalDecayingExpKernel(ut.TestCase): def setUp(self): self.kernel_size = 500 * pq.ms self.kernel = sigproc.CausalDecayingExpKernel(self.kernel_size) def test_evaluates_to_correct_values(self): t = sp.array([-0.1, 0, 0.6, 1]) * pq.s expected = sp.array([0.0, 2.0, 0.60238842, 0.27067057]) / pq.s actual = self.kernel(t) assert_array_almost_equal(expected, actual.rescale(expected.units)) def test_boundary_enclosing_at_least_is_correct(self): actual = self.kernel.boundary_enclosing_at_least(0.99) self.assertAlmostEqual(actual.rescale(pq.s), 2.30258509 * pq.s) class TestGaussianKernel(ut.TestCase): def setUp(self): self.kernel_size = 500 * pq.ms self.kernel = sigproc.GaussianKernel(self.kernel_size) def test_evaluates_to_correct_values(self): t = sp.array([-0.1, 0, 0.6, 1]) * pq.s expected = sp.array([0.78208539, 0.79788456, 0.38837211, 0.10798193]) /\ pq.s actual = self.kernel(t) assert_array_almost_equal(expected, actual.rescale(expected.units)) def test_boundary_enclosing_at_least_is_correct(self): actual = self.kernel.boundary_enclosing_at_least(0.99) self.assertAlmostEqual(actual.rescale(pq.s), 1.28791465 * pq.s) class TestLaplacianKernel(ut.TestCase): def setUp(self): self.kernel_size = 500 * pq.ms self.kernel = sigproc.LaplacianKernel(self.kernel_size) def test_evaluates_to_correct_values(self): t = sp.array([-0.1, 0, 0.6, 1]) * pq.s expected = sp.array([0.81873075, 1.0, 0.30119421, 0.13533528]) /\ pq.s actual = self.kernel(t) assert_array_almost_equal(expected, actual.rescale(expected.units)) def test_boundary_enclosing_at_least_is_correct(self): actual = self.kernel.boundary_enclosing_at_least(0.99) self.assertAlmostEqual(actual.rescale(pq.s), 2.30258509 * pq.s) def test_summed_dist_matrix(self): kernel = sigproc.LaplacianKernel(1.0, normalize=False) vectors = [sp.array([2.0, 1.0, 3.0]), sp.array([1.5, 4.0])] expected = sp.array( [[4.7421883311589941, 1.9891932723496157], [1.9891932723496157, 2.1641699972477975]]) actual = kernel.summed_dist_matrix(vectors) assert_array_almost_equal(expected, actual) def test_summed_dist_matrix_with_units(self): kernel = sigproc.LaplacianKernel(1000 * pq.ms, normalize=True) vectors = [sp.array([2.0, 1.0, 3.0]) * pq.s, sp.array([1500, 4000]) * pq.ms] expected = sp.array( [[4.7421883311589941, 1.9891932723496157], [1.9891932723496157, 2.1641699972477975]]) / 2.0 / pq.s actual = kernel.summed_dist_matrix(vectors) assert_array_almost_equal(expected, actual.rescale(1.0 / pq.s)) class TestRectangularKernel(ut.TestCase): def setUp(self): self.kernel_size = 500 * pq.ms self.kernel = sigproc.RectangularKernel(self.kernel_size) def test_evaluates_to_correct_values(self): t = sp.array([-0.1, 0, 0.6, 1]) * pq.s expected = sp.array([1.0, 1.0, 0.0, 0.0]) / pq.s actual = self.kernel(t) assert_array_almost_equal(expected, actual.rescale(expected.units)) def test_boundary_enclosing_at_least_is_correct(self): actual = self.kernel.boundary_enclosing_at_least(0.99) self.assertAlmostEqual( actual.rescale(self.kernel_size.units), self.kernel_size) class TestTriangularKernel(ut.TestCase): def setUp(self): self.kernel_size = 500 * pq.ms self.kernel = sigproc.TriangularKernel(self.kernel_size) def test_evaluates_to_correct_values(self): t = sp.array([-0.7, -0.1, 0, 0.3, 0.6, 1]) * pq.s expected = 2 * sp.array([0.0, 0.8, 1.0, 0.4, 0.0, 0.0]) / pq.s actual = self.kernel(t) assert_array_almost_equal(expected, actual.rescale(expected.units)) def test_boundary_enclosing_at_least_is_correct(self): actual = self.kernel.boundary_enclosing_at_least(0.99) self.assertAlmostEqual( actual.rescale(self.kernel_size.units), self.kernel_size) class Test_smooth(ut.TestCase): def test_convolution_with_empty_binned_array_returns_array_of_zeros(self): binned = sp.zeros(10) sampling_rate = 10 * pq.Hz result = sigproc.smooth(binned, sigproc.GaussianKernel(), sampling_rate) self.assertTrue(sp.all(result == 0.0)) def test_length_of_returned_array_equals_length_of_binned(self): binned = sp.ones(10) sampling_rate = 10 * pq.Hz result = sigproc.smooth(binned, sigproc.GaussianKernel(), sampling_rate) self.assertEqual(binned.size, result.size) def test_returns_smoothed_representation(self): binned = sp.array([0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0]) sampling_rate = 4 * pq.Hz kernel = sigproc.RectangularKernel(0.3 * pq.s) # Because of the low sampling rate the expected result is a bit off # from the analytical result. expected = sp.array( [0.0, 0.0, 0.0, 1.6666666, 1.6666666, 1.6666666, 0.0, 1.6666666, 1.6666666, 1.6666666, 0.0, 0.0]) * pq.Hz actual = sigproc.smooth(binned, kernel, sampling_rate=sampling_rate) assert_array_almost_equal(expected, actual) def test_mode_allows_full_convolution(self): binned = sp.ones(10) sampling_rate = 2.0 * pq.Hz kernel = sigproc.RectangularKernel(0.6 * pq.s) expected_length = 12 actual = sigproc.smooth( binned, kernel, sampling_rate=sampling_rate, mode='full') self.assertEqual(actual.size, expected_length) def test_mode_allows_valid_convolution(self): binned = sp.ones(10) sampling_rate = 2.0 * pq.Hz kernel = sigproc.RectangularKernel(0.6 * pq.s) expected_length = 8 actual = sigproc.smooth( binned, kernel, sampling_rate=sampling_rate, mode='valid') self.assertEqual(actual.size, expected_length) class Test_st_convolve(ut.TestCase): def test_convolution_with_empty_spike_train_returns_array_of_zeros(self): st = create_empty_spike_train() result, _ = sigproc.st_convolve(st, sigproc.GaussianKernel(), 1 * pq.Hz) self.assertTrue(sp.all(result == 0.0)) def test_length_of_returned_array_equals_sampling_rate_times_duration(self): start = 2.0 * pq.s stop = 5.0 * pq.s duration = stop - start sampling_rate = 12 * pq.Hz expected_length = (sampling_rate * duration).simplified st = create_empty_spike_train(start, stop) result, _ = sigproc.st_convolve( st, sigproc.GaussianKernel(), sampling_rate=sampling_rate) self.assertEqual(expected_length, result.size) def test_returns_convolved_spike_train(self): st = neo.SpikeTrain(sp.array([1.0, 2.0]) * pq.s, t_stop=3.0 * pq.s) kernel = sigproc.RectangularKernel(0.3 * pq.s) # Because of the low sampling rate the expected result is a bit off # from the analytical result. expected = sp.array( [0.0, 0.0, 0.0, 1.6666666, 1.6666666, 1.6666666, 0.0, 1.6666666, 1.6666666, 1.6666666, 0.0, 0.0]) * pq.Hz actual, _ = sigproc.st_convolve(st, kernel, sampling_rate=4 * pq.Hz) assert_array_almost_equal(expected, actual) def test_returns_discretization_bins(self): start = 2.0 * pq.s stop = 5.0 * pq.s sampling_rate = 2.0 * pq.Hz expected = sp.array([2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]) * pq.s st = create_empty_spike_train(start, stop) _, bins = sigproc.st_convolve( st, sigproc.GaussianKernel(), sampling_rate=sampling_rate) assert_array_almost_equal(expected, bins) def test_mode_allows_full_convolution(self): start = 2.0 * pq.s stop = 5.0 * pq.s sampling_rate = 2.0 * pq.Hz kernel = sigproc.RectangularKernel(0.6 * pq.s) st = create_empty_spike_train(start, stop) expected_length = (stop - start) * sampling_rate + 2 expected_bins = sp.array( [1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5]) * pq.s binned, bins = sigproc.st_convolve( st, kernel, mode='full', sampling_rate=sampling_rate) self.assertEqual(binned.size, expected_length) assert_array_almost_equal(expected_bins, bins) def test_mode_allows_valid_convolution(self): start = 2.0 * pq.s stop = 5.0 * pq.s sampling_rate = 2.0 * pq.Hz kernel = sigproc.RectangularKernel(0.6 * pq.s) st = create_empty_spike_train(start, stop) expected_length = (stop - start) * sampling_rate - 2 expected_bins = sp.array([2.5, 3.0, 3.5, 4.0, 4.5]) * pq.s binned, bins = sigproc.st_convolve( st, kernel, mode='valid', sampling_rate=sampling_rate) self.assertEqual(binned.size, expected_length) assert_array_almost_equal(expected_bins, bins) if __name__ == '__main__': ut.main() spykeutils-0.4.3/spykeutils/tests/__init__.py0000644000175000017500000000000012664623646017554 0ustar robrobspykeutils-0.4.3/spykeutils/progress_indicator.py0000644000175000017500000000314112664623646020564 0ustar robrobimport functools class CancelException(Exception): """ This is raised when a user cancels a progress process. It is used by :class:`ProgressIndicator` and its descendants. """ pass def ignores_cancel(function): """ Decorator for functions that should ignore a raised :class:`CancelException` and just return nothing in this case """ @functools.wraps(function) def inner(*args, **kwargs): try: return function(*args, **kwargs) except CancelException: return return inner class ProgressIndicator(object): """ Base class for classes indicating progress of a long operation. This class does not implement any of the methods and can be used as a dummy if no progress indication is needed. """ def set_ticks(self, ticks): """ Set the required number of ticks before the operation is done. :param int ticks: The number of steps that the operation will take. """ pass def begin(self, title=''): """ Signal that the operation starts. :param string title: The name of the whole operation. """ pass def step(self, num_steps=1): """ Signal that one or more steps of the operation were completed. :param int num_steps: The number of steps that have been completed. """ pass def set_status(self, new_status): """ Set status description. :param string new_status: A description of the current status. """ pass def done(self): """ Signal that the operation is done. """ passspykeutils-0.4.3/spykeutils/monkeypatch/0000755000175000017500000000000012664623646016635 5ustar robrobspykeutils-0.4.3/spykeutils/monkeypatch/quantities_patch.py0000644000175000017500000000254512664623646022562 0ustar robrobfrom __future__ import absolute_import import quantities as pq # At least up to quantities 0.10.1 the additional arguments to the min and max # function did not get passed along. # A fix already exists: # # Also a pull request exists which has not been merged at the time of writing # 01/18/2013: # def _Quanitity_max(self, axis=None, out=None): return pq.Quantity( self.magnitude.max(axis, out), self.dimensionality, copy=False ) pq.Quantity.max = _Quanitity_max def _Quanitity_min(self, axis=None, out=None): return pq.Quantity( self.magnitude.min(axis, out), self.dimensionality, copy=False ) pq.Quantity.min = _Quanitity_min # Python quantities does not use have additional parameters for astype() # which became a problem in linspace in numpy 1.11. This is a dirty, dirty # hack to allow the Quantity astype function to accept any arguments and work # with numpy >= 1.11. A bug has been filed at # _original_astype = pq.Quantity.astype def _Quantity_astype(self, dtype=None, *args, **kwargs): return _original_astype(self, dtype) pq.Quantity.astype = _Quantity_astype spykeutils-0.4.3/spykeutils/monkeypatch/test/0000755000175000017500000000000012664623646017614 5ustar robrobspykeutils-0.4.3/spykeutils/monkeypatch/test/test_quantities_patch.py0000644000175000017500000000307312664623646024575 0ustar robrob try: import unittest2 as ut assert ut # Suppress pyflakes warning about redefinition of unused ut except ImportError: import unittest as ut from numpy.testing import assert_array_equal, assert_array_almost_equal import scipy as sp import quantities as pq from spykeutils.monkeypatch import quantities_patch assert quantities_patch # Suppress pyflakes warning, patch applied by loading class TestQuantityMax(ut.TestCase): def test_returns_global_max(self): a = sp.array([[1, 2], [3, 4]]) * pq.s self.assertEqual(4, a.max()) def test_returns_axiswise_max(self): a = sp.array([[1, 2], [3, 4]]) * pq.s axis = 1 expected = sp.array([2, 4]) * pq.s assert_array_equal(expected, a.max(axis=axis)) def test_returns_result_in_out_array(self): a = sp.array([[1, 2], [3, 4]]) * pq.s out = 0 * pq.s expected = sp.array([4]) a.max(out=out) self.assertEqual(expected, out) class TestQuantityMin(ut.TestCase): def test_returns_global_min(self): a = sp.array([[1, 2], [3, 4]]) * pq.s self.assertEqual(1, a.min()) def test_returns_axiswise_min(self): a = sp.array([[1, 2], [3, 4]]) * pq.s axis = 1 expected = sp.array([1, 3]) * pq.s assert_array_equal(expected, a.min(axis=axis)) def test_returns_result_in_out_array(self): a = sp.array([[1, 2], [3, 4]]) * pq.s out = 0 * pq.s expected = 1 * pq.s a.min(out=out) self.assertEqual(expected, out) if __name__ == '__main__': ut.main() spykeutils-0.4.3/spykeutils/monkeypatch/__init__.py0000644000175000017500000000000012664623646020734 0ustar robrobspykeutils-0.4.3/spykeutils/spike_train_metrics.py0000644000175000017500000011227712664623646020735 0ustar robrobfrom monkeypatch import quantities_patch import quantities as pq import scipy as sp import _scipy_quantities as spq import signal_processing as sigproc import tools try: import pymuvr PYMUVR_AVAILABLE = True except ImportError: PYMUVR_AVAILABLE = False assert quantities_patch # Suppress pyflakes warning, patch applied by loading def _calc_multiunit_dist_matrix_from_single_trials(units, dist_func, **params): if len(units) <= 0: return sp.zeros((0, 0)) num_trials = len(units.itervalues().next()) if not all((len(v) == num_trials for v in units.itervalues())): raise ValueError("Number of trials differs among units.") D = sp.empty((num_trials, num_trials)) for i in xrange(num_trials): D[i, i] = 0.0 a = [units[k][i] for k in units.iterkeys()] for j in xrange(i + 1, num_trials): b = [units[k][j] for k in units.iterkeys()] D[i, j] = D[j, i] = dist_func(a, b, **params) return D def _create_matrix_from_indexed_function( shape, func, symmetric_2d=False, **func_params): mat = sp.empty(shape) if symmetric_2d: for i in xrange(shape[0]): for j in xrange(i, shape[1]): mat[i, j] = mat[j, i] = func(i, j, **func_params) else: for idx in sp.ndindex(*shape): mat[idx] = func(*idx, **func_params) return mat def _merge_trains_and_label_spikes(trains): labels = sp.concatenate( [sp.zeros(st.size, dtype=int) + i for i, st in enumerate(trains)]) trains = spq.concatenate([st.view(dtype=pq.Quantity) for st in trains]) sorted_indices = sp.argsort(trains) return trains[sorted_indices], labels[sorted_indices] def cs_dist( trains, smoothing_filter, sampling_rate, filter_area_fraction=sigproc.default_kernel_area_fraction): """ Calculates the Cauchy-Schwarz distance between two spike trains given a smoothing filter. Let :math:`v_a(t)` and :math:`v_b(t)` with :math:`t \\in \\mathcal{T}` be the spike trains convolved with some smoothing filter and :math:`V(a, b) = \\int_{\\mathcal{T}} v_a(t) v_b(t) dt`. Then, the Cauchy-Schwarz distance of the spike trains is defined as :math:`d_{CS}(a, b) = \\arccos \\frac{V(a, b)^2}{V(a, a) V(b, b)}`. The Cauchy-Schwarz distance is closely related to the Schreiber et al. similarity measure :math:`S_S` by :math:`d_{CS} = \\arccos S_S^2` This function numerically convolves the spike trains with the smoothing filter which can be quite slow and inaccurate. If the analytical result of the autocorrelation of the smoothing filter is known, one can use :func:`schreiber_similarity` for a more efficient and precise calculation. Further information can be found in *Paiva, A. R. C., Park, I., & Principe, J. (2010). Inner products for representation and learning in the spike train domain. Statistical Signal Processing for Neuroscience and Neurotechnology, Academic Press, New York.* :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of which the distance will be calculated pairwise. :param smoothing_filter: Smoothing filter to be convolved with the spike trains. :type smoothing_filter: :class:`.signal_processing.Kernel` :param sampling_rate: The sampling rate which will be used to bin the spike trains as inverse time scalar. :type sampling_rate: Quantity scalar :param float filter_area_fraction: A value between 0 and 1 which controls the interval over which the smoothing filter will be discretized. At least the given fraction of the complete smoothing filter area will be covered. Higher values can lead to more accurate results (besides the sampling rate). :returns: Matrix containing the Cauchy-Schwarz distance of all pairs of spike trains :rtype: 2-D array """ inner = st_inner( trains, trains, smoothing_filter, sampling_rate, filter_area_fraction) return sp.arccos( inner ** 2 / sp.diag(inner) / sp.atleast_2d(sp.diag(inner)).T) def event_synchronization( trains, tau=None, kernel=sigproc.RectangularKernel(1.0, normalize=False), sort=True): """ event_synchronization(trains, tau=None, kernel=signal_processing.RectangularKernel(1.0, normalize=False), sort=True) Calculates the event synchronization. Let :math:`d(x|y)` be the count of spikes in :math:`y` which occur shortly before an event in :math:`x` with a time difference of less than :math:`\\tau`. Moreover, let :math:`n_x` and :math:`n_y` be the number of total spikes in the spike trains :math:`x` and :math:`y`. The event synchrony is then defined as :math:`Q_T = \\frac{d(x|y) + d(y|x)}{\\sqrt{n_x n_y}}`. The time maximum time lag :math:`\\tau` can be determined automatically for each pair of spikes :math:`t^x_i` and :math:`t^y_j` by the formula :math:`\\tau_{ij} = \\frac{1}{2} \\min\{t^x_{i+1} - t^x_i, t^x_i - t^x_{i-1}, t^y_{j+1} - t^y_j, t^y_j - t^y_{j-1}\}` Further and more detailed information can be found in *Quiroga, R. Q., Kreuz, T., & Grassberger, P. (2002). Event synchronization: a simple and fast method to measure synchronicity and time delay patterns. Physical Review E, 66(4), 041904.* :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of which the van Rossum distance will be calculated pairwise. :param tau: The maximum time lag for two spikes to be considered coincident or synchronous as time scalar. To have it determined automatically by above formula set it to `None`. :type tau: Quantity scalar :param kernel: Kernel to use in the calculation of the distance. :type kernel: :class:`.signal_processing.Kernel` :param bool sort: Spike trains with sorted spike times are be needed for the calculation. You can set `sort` to `False` if you know that your spike trains are already sorted to decrease calculation time. :returns: Matrix containing the event synchronization for all pairs of spike trains. :rtype: 2-D array """ trains = [st.view(type=pq.Quantity) for st in trains] if sort: trains = [sp.sort(st) for st in trains] if tau is None: inf_array = sp.array([sp.inf]) isis = [spq.concatenate( (inf_array * st.units, sp.diff(st), inf_array * st.units)) for st in trains] auto_taus = [spq.minimum(t[:-1], t[1:]) for t in isis] def compute(i, j): if i == j: return 1.0 else: if tau is None: tau_mat = spq.minimum(*spq.meshgrid( auto_taus[i], auto_taus[j])) / 2.0 else: tau_mat = sp.tile(tau, (trains[j].size, trains[i].size)) coincidence = sp.sum(kernel( (trains[i] - sp.atleast_2d(trains[j]).T) / tau_mat)) normalization = 1.0 / sp.sqrt(trains[i].size * trains[j].size) return normalization * coincidence return _create_matrix_from_indexed_function( (len(trains), len(trains)), compute, kernel.is_symmetric()) def hunter_milton_similarity(trains, tau=1.0 * pq.s, kernel=None): """ Calculates the Hunter-Milton similarity measure. If the kernel function is denoted as :math:`K(t)`, a function :math:`d(x_k) = K(x_k - y_{k'})` can be defined with :math:`y_{k'}` being the closest spike in spike train :math:`y` to the spike :math:`x_k` in spike train :math:`x`. With this the Hunter-Milton similarity measure is :math:`S_H = \\frac{1}{2} \\left(\\frac{1}{n_x} \\sum_{k = 1}^{n_x} d(x_k) + \\frac{1}{n_y} \\sum_{k' = 1}^{n_y} d(y_{k'})\\right)`. This implementation returns 0 if one of the spike trains is empty, but 1 if both are empty. Further information can be found in - *Hunter, J. D., & Milton, J. G. (2003). Amplitude and Frequency Dependence of Spike Timing: Implications for Dynamic Regulation. Journal of Neurophysiology.* - *Dauwels, J., Vialatte, F., Weber, T., & Cichocki, A. (2009). On similarity measures for spike trains. Advances in Neuro-Information Processing, 177-185.* :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of which the Hunter-Milton similarity will be calculated pairwise. :param tau: The time scale for determining the coincidence of two events as time scalar. :type tau: Quantity scalar :param kernel: Kernel to use in the calculation of the distance. If `None`, a unnormalized Laplacian kernel will be used. :type kernel: :class:`.signal_processing.Kernel` :returns: Matrix containing the Hunter-Milton similarity for all pairs of spike trains. :rtype: 2-D array """ if kernel is None: kernel = sigproc.LaplacianKernel(tau, normalize=False) def compute(i, j): if i == j: return 1.0 elif trains[i].size <= 0 or trains[j].size <= 0: return 0.0 else: diff_matrix = sp.absolute(trains[i] - sp.atleast_2d(trains[j]).T) return 0.5 * ( sp.sum(kernel(sp.amin(diff_matrix, axis=0))) / trains[i].size + sp.sum(kernel(sp.amin(diff_matrix, axis=1))) / trains[j].size) return _create_matrix_from_indexed_function( (len(trains), len(trains)), compute, kernel.is_symmetric()) def norm_dist( trains, smoothing_filter, sampling_rate, filter_area_fraction=sigproc.default_kernel_area_fraction): """ Calculates the norm distance between spike trains given a smoothing filter. Let :math:`v_a(t)` and :math:`v_b(t)` with :math:`t \\in \\mathcal{T}` be the spike trains convolved with some smoothing filter. Then, the norm distance of the spike trains is defined as :math:`d_{ND}(a, b) = \\sqrt{\\int_{\\mathcal{T}} (v_a(t) - v_b(t))^2 dt}`. Further information can be found in *Paiva, A. R. C., Park, I., & Principe, J. (2010). Inner products for representation and learning in the spike train domain. Statistical Signal Processing for Neuroscience and Neurotechnology, Academic Press, New York.* :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of which the distance will be calculated pairwise. :param smoothing_filter: Smoothing filter to be convolved with the spike trains. :type smoothing_filter: :class:`.signal_processing.Kernel` :param sampling_rate: The sampling rate which will be used to bin the spike trains as inverse time scalar. :type sampling_rate: Quantity scalar :param float filter_area_fraction: A value between 0 and 1 which controls the interval over which the smoothing filter will be discretized. At least the given fraction of the complete smoothing filter area will be covered. Higher values can lead to more accurate results (besides the sampling rate). :returns: Matrix containing the norm distance of all pairs of spike trains given the smoothing_filter. :rtype: Quantity 2D with units depending on the smoothing filter (usually temporal frequency units) """ inner = st_inner( trains, trains, smoothing_filter, sampling_rate, filter_area_fraction) return spq.maximum( 0.0 * pq.Hz, (spq.diag(inner) + sp.atleast_2d(spq.diag(inner)).T - 2 * inner)) ** 0.5 def schreiber_similarity(trains, kernel, sort=True): """ Calculates the Schreiber et al. similarity measure between spike trains given a kernel. Let :math:`v_a(t)` and :math:`v_b(t)` with :math:`t \\in \\mathcal{T}` be the spike trains convolved with some smoothing filter and :math:`V(a, b) = \\int_{\\mathcal{T}} v_a(t) v_b(t) dt`. The autocorrelation of the smoothing filter corresponds to the kernel used to analytically calculate the Schreiber et al. similarity measure. It is defined as :math:`S_{S}(a, b) = \\frac{V(a, b)}{\\sqrt{V(a, a) V(b, b)}}`. It is closely related to the Cauchy-Schwarz distance :math:`d_{CS}` by :math:`S_S = \\sqrt{\\cos d_{CS}}`. In opposite to :func:`cs_dist` which numerically convolves the spike trains with a smoothing filter, this function directly uses the kernel resulting from the smoothing filter's autocorrelation. This allows a more accurate and faster calculation. Further information can be found in: - *Dauwels, J., Vialatte, F., Weber, T., & Cichocki, A. (2009). On similarity measures for spike trains. Advances in Neuro-Information Processing, 177-185.* - *Paiva, A. R. C., Park, I., & Principe, J. C. (2009). A comparison of binless spike train measures. Neural Computing and Applications, 19(3), 405-419. doi:10.1007/s00521-009-0307-6* :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of which the distance will be calculated pairwise. :param kernel: Kernel to use. It corresponds to a smoothing filter by being the autocorrelation of such a filter. :type kernel: :class:`.signal_processing.Kernel` :param bool sort: Spike trains with sorted spike times will be needed for the calculation. You can set `sort` to `False` if you know that your spike trains are already sorted to decrease calculation time. :returns: Matrix containing the Schreiber et al. similarity measure of all pairs of spike trains. :rtype: 2-D array """ k_dist = kernel.summed_dist_matrix(trains, not sort) def compute(i, j): return sp.sqrt( k_dist[i, j] * k_dist[j, i] / k_dist[i, i] / k_dist[j, j]) return _create_matrix_from_indexed_function( (len(trains), len(trains)), compute, kernel.is_symmetric()) def st_inner( a, b, smoothing_filter, sampling_rate, filter_area_fraction=sigproc.default_kernel_area_fraction): """ Calculates the inner product of spike trains given a smoothing filter. Let :math:`v_a(t)` and :math:`v_b(t)` with :math:`t \\in \\mathcal{T}` be the spike trains convolved with some smoothing filter. Then, the inner product of the spike trains is defined as :math:`\\int_{\\mathcal{T}} v_a(t)v_b(t) dt`. Further information can be found in *Paiva, A. R. C., Park, I., & Principe, J. (2010). Inner products for representation and learning in the spike train domain. Statistical Signal Processing for Neuroscience and Neurotechnology, Academic Press, New York.* :param sequence a: Sequence of :class:`neo.core.SpikeTrain` objects. :param sequence b: Sequence of :class:`neo.core.SpikeTrain` objects. :param smoothing_filter: A smoothing filter to be convolved with the spike trains. :type smoothing_filter: :class:`.signal_processing.Kernel` :param sampling_rate: The sampling rate which will be used to bin the spike train as inverse time scalar. :type sampling_rate: Quantity scalar :param float filter_area_fraction: A value between 0 and 1 which controls the interval over which the `smoothing_filter` will be discretized. At least the given fraction of the complete `smoothing_filter` area will be covered. Higher values can lead to more accurate results (besides the sampling rate). :returns: Matrix containing the inner product for each pair of spike trains with one spike train from `a` and the other one from `b`. :rtype: Quantity 2D with units depending on the smoothing filter (usually temporal frequency units) """ if all((x is y for x, y in zip(a, b))): convolved, sampling_rate = _prepare_for_inner_prod( a, smoothing_filter, sampling_rate, filter_area_fraction) convolved = convolved + convolved else: convolved, sampling_rate = _prepare_for_inner_prod( a + b, smoothing_filter, sampling_rate, filter_area_fraction) return (sp.inner(convolved[:len(a)], convolved[len(a):]) * convolved[0].units * convolved[1].units / sampling_rate) def _prepare_for_inner_prod( trains, smoothing_filter, sampling_rate, filter_area_fraction): t_start, t_stop = tools.maximum_spike_train_interval({0: trains}) padding = smoothing_filter.boundary_enclosing_at_least(filter_area_fraction) t_start -= 2 * padding t_stop += 2 * padding return [sigproc.st_convolve( st, smoothing_filter, sampling_rate, mode='full', binning_params={'t_start': t_start, 't_stop': t_stop}, kernel_discretization_params={'area_fraction': filter_area_fraction})[0] for st in trains], sampling_rate def st_norm( train, smoothing_filter, sampling_rate, filter_area_fraction=sigproc.default_kernel_area_fraction): """ Calculates the spike train norm given a smoothing filter. Let :math:`v(t)` with :math:`t \\in \\mathcal{T}` be a spike train convolved with some smoothing filter. Then, the norm of the spike train is defined as :math:`\\int_{\\mathcal{T}} v(t)^2 dt`. Further information can be found in *Paiva, A. R. C., Park, I., & Principe, J. (2010). Inner products for representation and learning in the spike train domain. Statistical Signal Processing for Neuroscience and Neurotechnology, Academic Press, New York.* :param train: Spike train of which to calculate the norm. :type train: :class:`neo.core.SpikeTrain` :param smoothing_filter: Smoothing filter to be convolved with the spike train. :type smoothing_filter: :class:`.signal_processing.Kernel` :param sampling_rate: The sampling rate which will be used to bin the spike train as inverse time scalar. :type sampling_rate: Quantity scalar :param float filter_area_fraction: A value between 0 and 1 which controls the interval over which the smoothing filter will be discretized. At least the given fraction of the complete smoothing filter area will be covered. Higher values can lead to more accurate results (besides the sampling rate). :returns: The norm of the spike train given the smoothing_filter. :rtype: Quantity scalar with units depending on the smoothing filter (usually temporal frequency units) """ return st_inner( [train], [train], smoothing_filter, sampling_rate, filter_area_fraction) ** 0.5 def van_rossum_dist(trains, tau=1.0 * pq.s, kernel=None, sort=True): """ Calculates the van Rossum distance. It is defined as Euclidean distance of the spike trains convolved with a causal decaying exponential smoothing filter. A detailed description can be found in *Rossum, M. C. W. (2001). A novel spike distance. Neural Computation, 13(4), 751-763.* This implementation is normalized to yield a distance of 1.0 for the distance between an empty spike train and a spike train with a single spike. Divide the result by sqrt(2.0) to get the normalization used in the cited paper. Given :math:`N` spike trains with :math:`n` spikes on average the run-time complexity of this function is :math:`O(N^2 n^2)`. An implementation in :math:`O(N^2 n)` would be possible but has a high constant factor rendering it slower in practical cases. :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of which the van Rossum distance will be calculated pairwise. :param tau: Decay rate of the exponential function as time scalar. Controls for which time scale the metric will be sensitive. This parameter will be ignored if `kernel` is not `None`. May also be :const:`scipy.inf` which will lead to only measuring differences in spike count. :type tau: Quantity scalar :param kernel: Kernel to use in the calculation of the distance. This is not the smoothing filter, but its autocorrelation. If `kernel` is `None`, an unnormalized Laplacian kernel with a size of `tau` will be used. :type kernel: :class:`.signal_processing.Kernel` :param bool sort: Spike trains with sorted spike times might be needed for the calculation. You can set `sort` to `False` if you know that your spike trains are already sorted to decrease calculation time. :returns: Matrix containing the van Rossum distances for all pairs of spike trains. :rtype: 2-D array """ if kernel is None: if tau == sp.inf: spike_counts = [st.size for st in trains] return (spike_counts - sp.atleast_2d(spike_counts).T) ** 2 kernel = sigproc.LaplacianKernel(tau, normalize=False) k_dist = kernel.summed_dist_matrix( [st.view(type=pq.Quantity) for st in trains], not sort) vr_dist = sp.empty_like(k_dist) for i, j in sp.ndindex(*k_dist.shape): vr_dist[i, j] = ( k_dist[i, i] + k_dist[j, j] - k_dist[i, j] - k_dist[j, i]) return sp.sqrt(vr_dist) def van_rossum_multiunit_dist(units, weighting, tau=1.0 * pq.s, kernel=None): """ Calculates the van Rossum multi-unit distance. The single-unit distance is defined as Euclidean distance of the spike trains convolved with a causal decaying exponential smoothing filter. A detailed description can be found in *Rossum, M. C. W. (2001). A novel spike distance. Neural Computation, 13(4), 751-763.* This implementation is normalized to yield a distance of 1.0 for the distance between an empty spike train and a spike train with a single spike. Divide the result by sqrt(2.0) to get the normalization used in the cited paper. Given the :math:`p`- and :math:`q`-th spike train of `a` and respectively `b` let :math:`R_{pq}` be the squared single-unit distance between these two spike trains. Then the multi-unit distance is :math:`\\sqrt{\\sum_p (R_{pp} + c \\cdot \\sum_{q \\neq p} R_{pq})}` with :math:`c` being equal to `weighting`. The weighting parameter controls the interpolation between a labeled line and a summed population coding. More information can be found in *Houghton, C., & Kreuz, T. (2012). On the efficient calculation of van Rossum distances. Network: Computation in Neural Systems, 23(1-2), 48-58.* Given :math:`N` spike trains in total with :math:`n` spikes on average the run-time complexity of this function is :math:`O(N^2 n^2)` and :math:`O(N^2 + Nn^2)` memory will be needed. If `pymuvr` is installed, this function will use the faster C++ implementation contained in the package. :param dict units: Dictionary of sequences with each sequence containing the trials of one unit. Each trial should be a :class:`neo.core.SpikeTrain` and all units should have the same number of trials. :param float weighting: Controls the interpolation between a labeled line and a summed population coding. :param tau: Decay rate of the exponential function as time scalar. Controls for which time scale the metric will be sensitive. This parameter will be ignored if `kernel` is not `None`. May also be :const:`scipy.inf` which will lead to only measuring differences in spike count. :type tau: Quantity scalar :param kernel: Kernel to use in the calculation of the distance. This is not the smoothing filter, but its autocorrelation. If `kernel` is `None`, an unnormalized Laplacian kernel with a size of `tau` will be used. :type kernel: :class:`.signal_processing.Kernel` :returns: A 2D array with the multi-unit distance for each pair of trials. :rtype: 2D arrary """ if kernel is None and tau != sp.inf: kernel = sigproc.LaplacianKernel(tau, normalize=False) if PYMUVR_AVAILABLE and tau != sp.inf: rescaled_trains = [] n_trials = len(units.itervalues().next()) for i in xrange(n_trials): trial_trains = [] for u, tr in units.iteritems(): trial_trains.append(list(tr[i].rescale(pq.s).magnitude)) rescaled_trains.append(trial_trains) t = tau.rescale(pq.s).magnitude r = pymuvr.square_distance_matrix(rescaled_trains, weighting, t) print r #print rescaled_trains, weighting, t print _calc_multiunit_dist_matrix_from_single_trials( units, _van_rossum_multiunit_dist_for_trial_pair, weighting=weighting, tau=tau, kernel=kernel) return r return _calc_multiunit_dist_matrix_from_single_trials( units, _van_rossum_multiunit_dist_for_trial_pair, weighting=weighting, tau=tau, kernel=kernel) def _van_rossum_multiunit_dist_for_trial_pair(a, b, weighting, tau, kernel): if kernel is None: spike_counts = sp.atleast_2d([st.size for st in a + b]) k_dist = spike_counts.T * (spike_counts - spike_counts.T) else: k_dist = kernel.summed_dist_matrix(a + b) non_diagonal = sp.logical_not(sp.eye(len(a))) summed_population = ( sp.trace(k_dist) - sp.trace(k_dist, len(a)) - sp.trace(k_dist, -len(a))) labeled_line = ( sp.sum(k_dist[:len(a), :len(a)][non_diagonal]) + sp.sum(k_dist[len(a):, len(a):][non_diagonal]) - sp.sum(k_dist[:len(a), len(a):][non_diagonal]) - sp.sum(k_dist[len(a):, :len(a)][non_diagonal])) return sp.sqrt(summed_population + weighting * labeled_line) def victor_purpura_dist(trains, q=1.0 * pq.Hz, kernel=None, sort=True): """ Calculates the Victor-Purpura's (VP) distance. It is often denoted as :math:`D^{\\text{spike}}[q]`. It is defined as the minimal cost of transforming spike train `a` into spike train `b` by using the following operations: * Inserting or deleting a spike (cost 1.0). * Shifting a spike from :math:`t` to :math:`t'` (cost :math:`q \\cdot |t - t'|`). A detailed description can be found in *Victor, J. D., & Purpura, K. P. (1996). Nature and precision of temporal coding in visual cortex: a metric-space analysis. Journal of Neurophysiology.* Given the average number of spikes :math:`n` in a spike train and :math:`N` spike trains the run-time complexity of this function is :math:`O(N^2 n^2)` and :math:`O(N^2 + n^2)` memory will be needed. :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of which the distance will be calculated pairwise. :param q: Cost factor for spike shifts as inverse time scalar. If `kernel` is not `None`, `q` will be ignored. :type q: Quantity scalar :param kernel: Kernel to use in the calculation of the distance. If `kernel` is `None`, an unnormalized triangular kernel with a half width of `2.0/q` will be used. :type kernel: :class:`.signal_processing.Kernel` :param bool sort: Spike trains with sorted spike times will be needed for the calculation. You can set `sort` to `False` if you know that your spike trains are already sorted to decrease calculation time. :returns: Matrix containing the VP distance of all pairs of spike trains. :rtype: 2-D array """ if kernel is None: if q == 0.0: num_spikes = sp.atleast_2d([st.size for st in trains]) return sp.absolute(num_spikes.T - num_spikes) else: kernel = sigproc.TriangularKernel(2.0 / q, normalize=False) if sort: trains = [sp.sort(st.view(type=pq.Quantity)) for st in trains] def compute(i, j): if i == j: return 0.0 else: return _victor_purpura_dist_for_trial_pair( trains[i], trains[j], kernel) return _create_matrix_from_indexed_function( (len(trains), len(trains)), compute, kernel.is_symmetric()) def _victor_purpura_dist_for_trial_pair(a, b, kernel): if a.size <= 0 or b.size <= 0: return max(a.size, b.size) if a.size < b.size: a, b = b, a # The algorithm used is based on the one given in # # Victor, J. D., & Purpura, K. P. (1996). Nature and precision of temporal # coding in visual cortex: a metric-space analysis. Journal of # Neurophysiology. # # It constructs a matrix G[i, j] containing the minimal cost when only # considering the first i and j spikes of the spike trains. However, one # never needs to store more than one row and one column at the same time # for calculating the VP distance. # cost[0, :cost.shape[1] - i] corresponds to G[i:, i]. In the same way # cost[1, :cost.shape[1] - i] corresponds to G[i, i:]. # # Moreover, the minimum operation on the costs of the three kind of actions # (delete, insert or move spike) can be split up in two operations. One # operation depends only on the already calculated costs and kernel # evaluation (insertion of spike vs moving a spike). The other minimum # depends on that result and the cost of deleting a spike. This operation # always depends on the last calculated element in the cost array and # corresponds to a recursive application of # f(accumulated_min[i]) = min(f(accumulated_min[i-1]), accumulated_min[i]) # + 1. That '+1' can be excluded from this function if the summed value for # all recursive applications is added upfront to accumulated_min. # Afterwards it has to be removed again except one for the currently # processed spike to get the real costs up to the evaluation of i. # # All currently calculated costs will be considered -1 because this saves # a number of additions as in most cases the cost would be increased by # exactly one (the only exception is shifting, but in that calculation is # already the addition of a constant involved, thus leaving the number of # operations the same). The increase by one will be added after calculating # all minima by shifting decreasing_sequence by one when removing it from # accumulated_min. min_dim, max_dim = b.size, a.size + 1 cost = sp.asfortranarray(sp.tile(sp.arange(float(max_dim)), (2, 1))) decreasing_sequence = sp.asfortranarray(cost[:, ::-1]) k = 1 - 2 * sp.asfortranarray(kernel( (sp.atleast_2d(a).T - b).view(type=pq.Quantity)).simplified) for i in xrange(min_dim): # determine G[i, i] == accumulated_min[:, 0] #accumulated_min = sp.empty((2, max_dim - i - 1)) accumulated_min = cost[:, :-i - 1] + k[i:, i] accumulated_min[1, :b.size - i] = cost[1, :b.size - i] + k[i, i:] accumulated_min = sp.minimum( accumulated_min, # shift cost[:, 1:max_dim - i]) # insert acc_dim = accumulated_min.shape[1] # delete vs min(insert, shift) accumulated_min[:, 0] = min(cost[1, 1], accumulated_min[0, 0]) # determine G[i, :] and G[:, i] by propagating minima. accumulated_min += decreasing_sequence[:, -acc_dim - 1:-1] accumulated_min = sp.minimum.accumulate(accumulated_min, axis=1) cost[:, :acc_dim] = accumulated_min - decreasing_sequence[:, -acc_dim:] return cost[0, -min_dim - 1] def victor_purpura_multiunit_dist( units, reassignment_cost, q=1.0 * pq.Hz, kernel=None): """ Calculates the Victor-Purpura's (VP) multi-unit distance. It is defined as the minimal cost of transforming the spike trains `a` into spike trains `b` by using the following operations: * Inserting or deleting a spike (cost 1.0). * Shifting a spike from :math:`t` to :math:`t'` (cost :math:`q \\cdot |t - t'|`). * Moving a spike to another spike train (cost `reassignment_cost`). A detailed description can be found in *Aronov, D. (2003). Fast algorithm for the metric-space analysis of simultaneous responses of multiple single neurons. Journal of Neuroscience Methods.* Given the average number of spikes :math:`N` in a spike train and :math:`L` units with :math:`n` spike trains each the run-time complexity is :math:`O(n^2 LN^{L+1})`. The space complexity is :math:`O(n^2 + LN^{L+1})`. For calculating the distance between only two units one should use :func:`victor_purpura_dist` which is more efficient. :param dict units: Dictionary of sequences with each sequence containing the trials of one unit. Each trial should be a :class:`neo.core.SpikeTrain` and all units should have the same number of trials. :param float reassignment_cost: Cost to reassign a spike from one train to another (sometimes denoted with :math:`k`). Should be between 0 and 2. For 0 spikes can be reassigned without any cost, for 2 and above it is cheaper to delete and reinsert a spike. :param q: Cost factor for spike shifts as inverse time scalar. If `kernel` is not `None`, `q` will be ignored. :type q: Quantity scalar :param kernel: Kernel to use in the calculation of the distance. If `kernel` is `None`, an unnormalized triangular kernel with a half width of `2.0/q` will be used. :type kernel: :class:`.signal_processing.Kernel` :returns: A 2D array with the multi-unit distance for each pair of trials. :rtype: 2D arrary """ if kernel is None: kernel = sigproc.TriangularKernel(2.0 / q, normalize=False) return _calc_multiunit_dist_matrix_from_single_trials( units, _victor_purpura_multiunit_dist_for_trial_pair, reassignment_cost=reassignment_cost, kernel=kernel) def _victor_purpura_multiunit_dist_for_trial_pair( a, b, reassignment_cost, kernel): # The algorithm used is based on the one given in # # Victor, J. D., & Purpura, K. P. (1996). Nature and precision of temporal # coding in visual cortex: a metric-space analysis. Journal of # Neurophysiology. # # It constructs a matrix cost[i, j_1, ... j_L] containing the minimal cost # when only considering the first i spikes of the merged spikes of a and # j_w spikes of the spike trains of b (the reference given above denotes # this matrix with G). In this implementation the only the one submatrix # for one specific i is stored as in each step only i-1 and i will be # accessed. That saves some memory. # Initialization of various variables needed by the algorithm. Also swap # a and b if it will save time as the algorithm is not symmetric. a_num_spikes = [st.size for st in a] b_num_spikes = [st.size for st in b] a_num_total_spikes = sp.sum(a_num_spikes) complexity_same = a_num_total_spikes * sp.prod(b_num_spikes) complexity_swapped = sp.prod(a_num_spikes) * sp.sum(b_num_spikes) if complexity_swapped < complexity_same: a, b = b, a a_num_spikes, b_num_spikes = b_num_spikes, a_num_spikes a_num_total_spikes = sp.sum(a_num_spikes) if a_num_total_spikes <= 0: return sp.sum(b_num_spikes) b_dims = tuple(sp.asarray(b_num_spikes) + 1) cost = sp.asfarray(sp.sum(sp.indices(b_dims), axis=0)) a_merged = _merge_trains_and_label_spikes(a) b_strides = sp.cumprod((b_dims + (1,))[::-1])[:-1] flat_b_indices = sp.arange(cost.size) b_indices = sp.vstack(sp.unravel_index(flat_b_indices, b_dims)) flat_neighbor_indices = sp.maximum( 0, sp.atleast_2d(flat_b_indices).T - b_strides[::-1]) invalid_neighbors = b_indices.T == 0 b_train_mat = sp.empty((len(b), sp.amax(b_num_spikes))) * b[0].units for i, st in enumerate(b): b_train_mat[i, :st.size] = st.rescale(b[0].units) b_train_mat[i, st.size:] = sp.nan * b[0].units reassignment_costs = sp.empty((a_merged[0].size,) + b_train_mat.shape) reassignment_costs.fill(reassignment_cost) reassignment_costs[sp.arange(a_merged[1].size), a_merged[1], :] = 0.0 k = 1 - 2 * kernel(sp.atleast_2d( a_merged[0]).T - b_train_mat.flatten()).simplified.reshape( (a_merged[0].size,) + b_train_mat.shape) + reassignment_costs decreasing_sequence = flat_b_indices[::-1] # Do the actual calculations. for a_idx in xrange(1, a_num_total_spikes + 1): base_costs = cost.flat[flat_neighbor_indices] base_costs[invalid_neighbors] = sp.inf min_base_cost_labels = sp.argmin(base_costs, axis=1) cost_all_possible_shifts = k[a_idx - 1, min_base_cost_labels, :] + \ sp.atleast_2d(base_costs[flat_b_indices, min_base_cost_labels]).T cost_shift = cost_all_possible_shifts[ sp.arange(cost_all_possible_shifts.shape[0]), b_indices[min_base_cost_labels, flat_b_indices] - 1] cost_delete_in_a = cost.flat[flat_b_indices] # cost_shift is dimensionless, but there is a bug in quantities with # the minimum function: # # The explicit request for the magnitude circumvents this problem. cost.flat = sp.minimum(cost_delete_in_a, cost_shift.magnitude) + 1 cost.flat[0] = sp.inf # Minimum with cost for deleting in b # The calculation order is somewhat different from the order one would # expect from the naive algorithm. This implementation, however, # optimizes the use of the CPU cache giving a considerable speed # improvement. # Basically this codes calculates the values of a row of elements for # each dimension of cost. for dim_size, stride in zip(b_dims[::-1], b_strides): for i in xrange(stride): segment_size = dim_size * stride for j in xrange(i, cost.size, segment_size): s = sp.s_[j:j + segment_size:stride] seq = decreasing_sequence[-cost.flat[s].size:] cost.flat[s] = sp.minimum.accumulate( cost.flat[s] + seq) - seq return cost.flat[-1] spykeutils-0.4.3/spykeutils/rate_estimation.py0000644000175000017500000002475012664623646020064 0ustar robrobfrom __future__ import division import scipy as sp import quantities as pq import neo from progress_indicator import ProgressIndicator import signal_processing as sigproc import tools import copy as cp from . import SpykeException def psth( trains, bin_size, rate_correction=True, start=0 * pq.ms, stop=sp.inf * pq.s): """ Return dictionary of peri stimulus time histograms for a dictionary of spike train lists. :param dict trains: A dictionary of lists of :class:`neo.core.SpikeTrain` objects. :param bin_size: The desired bin size (as a time quantity). :type bin_size: Quantity scalar :param bool rate_correction: Determines if a rates (``True``) or counts (``False``) are returned. :param start: The desired time for the start of the first bin. It will be recalculated if there are spike trains which start later than this time. :type start: Quantity scalar :param stop: The desired time for the end of the last bin. It will be recalculated if there are spike trains which end earlier than this time. :type stop: Quantity scalar :returns: A dictionary (with the same indices as ``trains``) of arrays containing counts (or rates if ``rate_correction`` is ``True``) and the bin borders. :rtype: dict, Quantity 1D """ if not trains: raise SpykeException('No spike trains for PSTH!') start, stop = tools.minimum_spike_train_interval(trains, start, stop) binned, bins = tools.bin_spike_trains(trains, 1.0 / bin_size, start, stop) cumulative = {} time_multiplier = 1.0 / float(bin_size.rescale(pq.s)) for u in binned: if not binned[u]: cumulative[u] = sp.array([]) else: if rate_correction: cumulative[u] = sp.mean(sp.array(binned[u]), 0) else: cumulative[u] = sp.sum(sp.array(binned[u]), 0) cumulative[u] *= time_multiplier return cumulative, bins def aligned_spike_trains(trains, events, copy=True): """ Return a list of spike trains aligned to an event (the event will be time 0 on the returned trains). :param list trains: A list of :class:`neo.core.SpikeTrain` objects. :param dict events: A dictionary of Event objects, indexed by segment. These events will be used to align the spike trains and will be at time 0 for the aligned spike trains. :param bool copy: Determines if aligned copies of the original spike trains will be returned. If not, every spike train needs exactly one corresponding event, otherwise a ``ValueError`` will be raised. Otherwise, entries with no event will be ignored. """ ret = [] for t in trains: s = t.segment if s not in events: if not copy: raise ValueError( 'Cannot align spike trains: At least one segment does' + 'not have an align event.') continue e = events[s] if copy: st = neo.SpikeTrain( t, t.t_stop, units=t.units, sampling_rate=t.sampling_rate, t_start=t.t_start, waveforms=t.waveforms, left_sweep=t.left_sweep, name=t.name, file_origin=t.file_origin, description=t.description, **t.annotations) else: st = t st -= e.time st.t_stop -= e.time st.t_start -= e.time ret.append(st) return ret def spike_density_estimation(trains, start=0 * pq.ms, stop=None, kernel=None, kernel_size=100 * pq.ms, optimize_steps=None, progress=None): """ Create a spike density estimation from a dictionary of lists of spike trains. The spike density estimations give an estimate of the instantaneous rate. The density estimation is evaluated at 1024 equally spaced points covering the range of the input spike trains. Optionally finds optimal kernel size for given data using the algorithm from (Shimazaki, Shinomoto. Journal of Computational Neuroscience. 2010). :param dict trains: A dictionary of :class:`neo.core.SpikeTrain` lists. :param start: The desired time for the start of the estimation. It will be recalculated if there are spike trains which start later than this time. This parameter can be negative (which could be useful when aligning on events). :type start: Quantity scalar :param stop: The desired time for the end of the estimation. It will be recalculated if there are spike trains which end earlier than this time. :type stop: Quantity scalar :param kernel: The kernel function or instance to use, should accept two parameters: A ndarray of distances and a kernel size. The total area under the kernel function should be 1. Automatic optimization assumes a Gaussian kernel and will likely not produce optimal results for different kernels. Default: Gaussian kernel :type kernel: func or :class:`.signal_processing.Kernel` :param kernel_size: A uniform kernel size for all spike trains. Only used if optimization of kernel sizes is not used. :type kernel_size: Quantity scalar :param optimize_steps: An array of time lengths that will be considered in the kernel width optimization. Note that the optimization assumes a Gaussian kernel and will most likely not give the optimal kernel size if another kernel is used. If None, ``kernel_size`` will be used. :type optimize_steps: Quantity 1D :param progress: Set this parameter to report progress. :type progress: :class:`.progress_indicator.ProgressIndicator` :returns: Three values: * A dictionary of the spike density estimations (Quantity 1D in Hz). Indexed the same as ``trains``. * A dictionary of kernel sizes (Quantity scalars). Indexed the same as ``trains``. * The used evaluation points. :rtype: dict, dict, Quantity 1D """ if not progress: progress = ProgressIndicator() if optimize_steps is None or len(optimize_steps) < 1: units = kernel_size.units else: units = optimize_steps.units if kernel is None: kernel = sigproc.GaussianKernel(100 * pq.ms) # Prepare evaluation points max_start, max_stop = tools.minimum_spike_train_interval(trains) start = max(start, max_start) start.units = units if stop is not None: stop = min(stop, max_stop) else: stop = max_stop stop.units = units bins = sp.linspace(start, stop, 1025) eval_points = bins[:-1] + (bins[1] - bins[0]) / 2 if optimize_steps is None or len(optimize_steps) < 1: kernel_size = {u: kernel_size for u in trains} else: # Find optimal kernel size for all spike train sets progress.set_ticks(len(optimize_steps) * len(trains)) progress.set_status('Calculating optimal kernel size') kernel_size = {} for u, t in trains.iteritems(): c = collapsed_spike_trains(t) kernel_size[u] = optimal_gauss_kernel_size( c.time_slice(start, stop), optimize_steps, progress) progress.set_ticks(len(trains)) progress.set_status('Creating spike density plot') # Calculate KDEs kde = {} for u, t in trains.iteritems(): # Collapse spike trains collapsed = collapsed_spike_trains(t).rescale(units) scaled_kernel = sigproc.as_kernel_of_size(kernel, kernel_size[u]) # Create density estimation using convolution sliced = collapsed.time_slice(start, stop) sampling_rate = 1024.0 / (sliced.t_stop - sliced.t_start) kde[u] = sigproc.st_convolve( sliced, scaled_kernel, sampling_rate, kernel_discretization_params={ 'num_bins': 2048, 'ensure_unit_area': True})[0] / len(trains[u]) kde[u].units = pq.Hz return kde, kernel_size, eval_points def collapsed_spike_trains(trains): """ Return a superposition of a list of spike trains. :param iterable trains: A list of :class:`neo.core.SpikeTrain` objects :returns: A spike train object containing all spikes of the given spike trains. :rtype: :class:`neo.core.SpikeTrain` """ if not trains: return neo.SpikeTrain([] * pq.s, 0 * pq.s) start = min((t.t_start for t in trains)) stop = max((t.t_stop for t in trains)) collapsed = [] for t in trains: collapsed.extend(sp.asarray(t.rescale(stop.units))) return neo.SpikeTrain(collapsed * stop.units, t_stop=stop, t_start=start) def optimal_gauss_kernel_size(train, optimize_steps, progress=None): """ Return the optimal kernel size for a spike density estimation of a spike train for a gaussian kernel. This function takes a single spike train, which can be a superposition of multiple spike trains (created with :func:`collapsed_spike_trains`) that should be included in a spike density estimation. Implements the algorithm from (Shimazaki, Shinomoto. Journal of Computational Neuroscience. 2010). :param train: The spike train for which the kernel size should be optimized. :type train: :class:`neo.core.SpikeTrain` :param optimize_steps: Array of kernel sizes to try (the best of these sizes will be returned). :type optimize_steps: Quantity 1D :param progress: Set this parameter to report progress. Will be advanced by len(`optimize_steps`) steps. :type progress: :class:`.progress_indicator.ProgressIndicator` :returns: Best of the given kernel sizes :rtype: Quantity scalar """ if not progress: progress = ProgressIndicator() x = train.rescale(optimize_steps.units) N = len(train) C = {} sampling_rate = 1024.0 / (x.t_stop - x.t_start) dt = float(1.0 / sampling_rate) y_hist = tools.bin_spike_trains({0: [x]}, sampling_rate)[0][0][0] y_hist = sp.asfarray(y_hist) / N / dt for step in optimize_steps: s = float(step) yh = sigproc.smooth( y_hist, sigproc.GaussianKernel(2 * step), sampling_rate, num_bins=2048, ensure_unit_area=True) * optimize_steps.units # Equation from Matlab code, 7/2012 c = (sp.sum(yh ** 2) * dt - 2 * sp.sum(yh * y_hist) * dt + 2 * 1 / sp.sqrt(2 * sp.pi) / s / N) C[s] = c * N * N progress.step() # Return kernel size with smallest cost return min(C, key=C.get) * optimize_steps.units spykeutils-0.4.3/spykeutils/tools.py0000644000175000017500000002756212664623646016041 0ustar robrobimport neo try: import neo.description HAS_DESCRIPTION = True except ImportError: HAS_DESCRIPTION = False import quantities as pq import scipy as sp import _scipy_quantities as spq def apply_to_dict(fn, dictionary, *args): """ Applies a function to all spike trains in a dictionary of spike train sequences. :param function fn: Function to apply. Should take a :class:`neo.core.SpikeTrain` as first argument. :param dict dictionary: Dictionary of sequences of :class:`neo.core.SpikeTrain` objects to apply the function to. :param args: Additional arguments which will be passed to ``fn``. :returns: A new dictionary with the same keys as ``dictionary``. :rtype: dict """ applied = {} for k in dictionary: applied[k] = [fn(st, *args) for st in dictionary[k]] return applied def bin_spike_trains(trains, sampling_rate, t_start=None, t_stop=None): """ Creates binned representations of spike trains. :param dict trains: A dictionary of sequences of :class:`neo.core.SpikeTrain` objects. :param sampling_rate: The sampling rate which will be used to bin the spike trains as inverse time scalar. :type sampling_rate: Quantity scalar :type t_start: The desired time for the start of the first bin as time scalar. It will be the minimum start time of all spike trains if ``None`` is passed. :type t_start: Quantity scalar :param t_stop: The desired time for the end of the last bin as time scalar. It will be the maximum stop time of all spike trains if ``None`` is passed. :type t_stop: Quantity scalar :returns: A dictionary (with the same indices as ``trains``) of lists of spike train counts and the bin borders. :rtype: dict, Quantity 1D with time units """ if t_start is None or t_stop is None: max_start, max_stop = maximum_spike_train_interval(trains) if t_start is None: t_start = max_start if t_stop is None: t_stop = max_stop t_start = t_start.rescale(t_stop.units) duration = t_stop - t_start num_bins = (sampling_rate * duration).simplified bins = sp.arange(num_bins + 1) * (duration / num_bins) + t_start return apply_to_dict(_bin_single_spike_train, trains, bins), bins def _bin_single_spike_train(train, bins): """ Return a binned representation of SpikeTrain object. :param train: A spike train to bin. :type train: :class:`neo.core.SpikeTrain` :param bins: The bin edges, including the rightmost edge, with time units. :type bins: Quantity 1D :returns: The binned spike train. :rtype: 1-D array """ return sp.histogram(train.rescale(bins.units), bins)[0] def concatenate_spike_trains(trains): """ Concatenates spike trains. :param sequence trains: :class:`neo.core.SpikeTrain` objects to concatenate. :returns: A spike train consisting of the concatenated spike trains. The spikes will be in the order of the given spike trains and ``t_start`` and ``t_stop`` will be set to the minimum and maximum value. :rtype: :class:`neo.core.SpikeTrain` """ t_start, t_stop = maximum_spike_train_interval({0: trains}) return neo.SpikeTrain( spq.concatenate([train.view(type=pq.Quantity) for train in trains]), t_start=t_start, t_stop=t_stop) def minimum_spike_train_interval( trains, t_start=-sp.inf * pq.s, t_stop=sp.inf * pq.s): """ Computes the maximum starting time and minimum end time that all given spike trains share. This yields the shortest interval shared by all spike trains. :param dict trains: A dictionary of sequences of :class:`neo.core.SpikeTrain` objects. :param t_start: Minimal starting time to return. :type t_start: Quantity scalar :param t_stop: Maximum end time to return. If ``None``, infinity is used. :type t_stop: Quantity scalar :returns: Maximum shared t_start time and minimum shared t_stop time as time scalars. :rtype: Quantity scalar, Quantity scalar """ if t_stop is None: t_stop = sp.inf * pq.s # Load data and find shortest spike train for st in trains.itervalues(): if len(st) > 0: # Minimum length of spike of all spike trains for this unit t_start = max(t_start, max((t.t_start for t in st))) t_stop = min(t_stop, min((t.t_stop for t in st))) if t_stop == sp.inf * pq.s: t_stop = t_start return t_start, t_stop def maximum_spike_train_interval( trains, t_start=sp.inf * pq.s, t_stop=-sp.inf * pq.s): """ Computes the minimum starting time and maximum end time of all given spike trains. This yields an interval containing the spikes of all spike trains. :param dict trains: A dictionary of sequences of :class:`neo.core.SpikeTrain` objects. :param t_start: Maximum starting time to return. :type t_start: Quantity scalar :param t_stop: Minimum end time to return. If ``None``, infinity is used. :type t_stop: Quantity scalar :returns: Minimum t_start time and maximum t_stop time as time scalars. :rtype: Quantity scalar, Quantity scalar """ if t_stop is None: t_stop = sp.inf * pq.s for st in trains.itervalues(): if len(st) > 0: t_start = min(t_start, min((t.t_start for t in st))) t_stop = max(t_stop, max((t.t_stop for t in st))) return t_start, t_stop def _handle_orphans(obj, remove): """ Removes half-orphaned Spikes and SpikeTrains that occur when removing an object upwards in the hierarchy. """ if isinstance(obj, neo.Segment): for s in obj.spikes: if s.unit: if not remove: s.segment = None else: try: s.unit.spikes.remove(s) except ValueError: pass for st in obj.spiketrains: if st.unit: if not remove: st.segment = None else: try: st.unit.spiketrains.remove(st) except ValueError: pass elif isinstance(obj, neo.Unit): for s in obj.spikes: if s.segment: if not remove: s.unit = None else: try: s.segment.spikes.remove(s) except ValueError: pass for st in obj.spiketrains: if st.segment: if not remove: st.unit = None else: try: st.segment.spiketrains.remove(st) except ValueError: pass elif isinstance(obj, neo.RecordingChannelGroup): for u in obj.units: _handle_orphans(u, remove) def remove_from_hierarchy(obj, remove_half_orphans=True): """ Removes a Neo object from the hierarchy it is embedded in. Mostly downward links are removed (except for possible links in :class:`neo.core.Spike` or :class:`neo.core.SpikeTrain` objects). For example, when ``obj`` is a :class:`neo.core.Segment`, the link from its parent :class:`neo.core.Block` will be severed. Also, all links to the segment from its spikes and spike trains will be severed. :param obj: The object to be removed. :type obj: Neo object :param bool remove_half_orphans: When True, :class:`neo.core.Spike` and :class:`neo.core.SpikeTrain` belonging to a :class:`neo.core.Segment` or :class:`neo.core.Unit` removed by this function will be removed from the hierarchy as well, even if they are still linked from a :class:`neo.core.Unit` or :class:`neo.core.Segment`, respectively. In this case, their links to the hierarchy defined by ``obj`` will be kept intact. """ classname = type(obj).__name__ # Parent for arbitrary object if HAS_DESCRIPTION: if classname in neo.description.many_to_one_relationship: for n in neo.description.many_to_one_relationship[classname]: p = getattr(obj, n.lower()) if p is None: continue l = getattr(p, classname.lower() + 's', ()) try: l.remove(obj) except ValueError: pass else: for n in obj._single_parent_objects: p = getattr(obj, n.lower()) if p is None: continue l = getattr(p, classname.lower() + 's', ()) try: l.remove(obj) except ValueError: pass # Many-to-many relationships if isinstance(obj, neo.RecordingChannel): for rcg in obj.recordingchannelgroups: try: idx = rcg.recordingchannels.index(obj) if rcg.channel_indexes.shape[0] == len(rcg.recordingchannels): rcg.channel_indexes = sp.delete(rcg.channel_indexes, idx) if rcg.channel_names.shape[0] == len(rcg.recordingchannels): rcg.channel_names = sp.delete(rcg.channel_names, idx) rcg.recordingchannels.remove(obj) except ValueError: pass if isinstance(obj, neo.RecordingChannelGroup): for rc in obj.recordingchannels: try: rc.recordingchannelgroups.remove(obj) except ValueError: pass _handle_orphans(obj, remove_half_orphans) def extract_spikes(train, signals, length, align_time): """ Extract spikes with waveforms from analog signals using a spike train. Spikes that are too close to the beginning or end of the shortest signal to be fully extracted are ignored. :type train: :class:`neo.core.SpikeTrain` :param train: The spike times. :param sequence signals: A sequence of :class:`neo.core.AnalogSignal` objects from which the spikes are extracted. The waveforms of the returned spikes are extracted from these signals in the same order they are given. :type length: Quantity scalar :param length: The length of the waveform to extract as time scalar. :type align_time: Quantity scalar :param align_time: The alignment time of the spike times as time scalar. This is the time delta from the start of the extracted waveform to the exact time of the spike. :returns: A list of :class:`neo.core.Spike` objects, one for each time point in ``train``. All returned spikes include their ``waveform`` property. :rtype: list """ if not signals: raise ValueError('No signals to extract spikes from') ref = signals[0] for s in signals[1:]: if ref.sampling_rate != s.sampling_rate: raise ValueError( 'All signals for spike extraction need the same sampling rate') wave_unit = signals[0].units srate = signals[0].sampling_rate end = min(s.shape[0] for s in signals) aligned_train = train - align_time cut_samples = int((length * srate).simplified) st = sp.asarray((aligned_train * srate).simplified) # Find extraction epochs st_ok = (st >= 0) * (st < end - cut_samples) epochs = sp.vstack((st[st_ok], st[st_ok] + cut_samples)).T.astype(sp.int64) nspikes = epochs.shape[0] if not nspikes: return [] # Create data data = sp.vstack([sp.asarray(s.rescale(wave_unit)) for s in signals]) nc = len(signals) spikes = [] for s in xrange(nspikes): waveform = sp.zeros((cut_samples, nc)) for c in xrange(nc): waveform[:, c] = \ data[c, epochs[s, 0]:epochs[s, 1]] spikes.append(neo.Spike(train[st_ok][s], waveform=waveform * wave_unit, sampling_rate=srate)) return spikes spykeutils-0.4.3/spykeutils/plot/0000755000175000017500000000000012664623646015271 5ustar robrobspykeutils-0.4.3/spykeutils/plot/sde.py0000644000175000017500000001200612664623646016415 0ustar robrobimport scipy as sp from guiqwt.builder import make from guiqwt.baseplot import BasePlot from guiqwt.plot import BaseCurveWidget import quantities as pq from .. import SpykeException from .. import rate_estimation from .. import signal_processing from ..progress_indicator import ProgressIndicator from dialog import PlotDialog import helper @helper.needs_qt def sde(trains, events=None, start=0 * pq.ms, stop=None, kernel_size=100 * pq.ms, optimize_steps=0, minimum_kernel=10 * pq.ms, maximum_kernel=500 * pq.ms, kernel=None, time_unit=pq.ms, progress=None): """ Create a spike density estimation plot. The spike density estimations give an estimate of the instantaneous rate. Optionally finds optimal kernel size for given data. :param dict trains: A dictionary of :class:`neo.core.SpikeTrain` lists. :param dict events: A dictionary (with the same indices as ``trains``) of Event objects or lists of Event objects. In case of lists, the first event in the list will be used for alignment. The events will be at time 0 on the plot. If None, spike trains are used unmodified. :param start: The desired time for the start of the first bin. It will be recalculated if there are spike trains which start later than this time. This parameter can be negative (which could be useful when aligning on events). :type start: Quantity scalar :param stop: The desired time for the end of the last bin. It will be recalculated if there are spike trains which end earlier than this time. :type stop: Quantity scalar :param kernel_size: A uniform kernel size for all spike trains. Only used if optimization of kernel sizes is not used (i.e. ``optimize_steps`` is 0). :type kernel_size: Quantity scalar :param int optimize_steps: The number of different kernel sizes tried between ``minimum_kernel`` and ``maximum_kernel``. If 0, ``kernel_size`` will be used. :param minimum_kernel: The minimum kernel size to try in optimization. :type minimum_kernel: Quantity scalar :param maximum_kernel: The maximum kernel size to try in optimization. :type maximum_kernel: Quantity scalar :param kernel: The kernel function or instance to use, should accept two parameters: A ndarray of distances and a kernel size. The total area under the kernel function should be 1. Automatic optimization assumes a Gaussian kernel and will likely not produce optimal results for different kernels. Default: Gaussian kernel :type kernel: func or :class:`spykeutils.signal_processing.Kernel` :param Quantity time_unit: Unit of X-Axis. :param progress: Set this parameter to report progress. :type progress: :class:`spykeutils.progress_indicator.ProgressIndicator` """ if not progress: progress = ProgressIndicator() start.units = time_unit if stop: stop.units = time_unit kernel_size.units = time_unit minimum_kernel.units = time_unit maximum_kernel.units = time_unit if kernel is None: kernel = signal_processing.GaussianKernel(100 * pq.ms) # Align spike trains for u in trains: if events: trains[u] = rate_estimation.aligned_spike_trains( trains[u], events) # Calculate spike density estimation if optimize_steps: steps = sp.logspace(sp.log10(minimum_kernel), sp.log10(maximum_kernel), optimize_steps) * time_unit sde, kernel_size, eval_points = \ rate_estimation.spike_density_estimation( trains, start, stop, optimize_steps=steps, kernel=kernel, progress=progress) else: sde, kernel_size, eval_points = \ rate_estimation.spike_density_estimation( trains, start, stop, kernel_size=kernel_size, kernel=kernel, progress=progress) progress.done() if not sde: raise SpykeException('No spike trains for SDE!') # Plot win_title = 'Kernel Density Estimation' win = PlotDialog(toolbar=True, wintitle=win_title) pW = BaseCurveWidget(win) plot = pW.plot plot.set_antialiasing(True) for u in trains: if u and u.name: name = u.name else: name = 'Unknown' curve = make.curve( eval_points, sde[u], title='%s, Kernel width %.2f %s' % (name, kernel_size[u], time_unit.dimensionality.string), color=helper.get_object_color(u)) plot.add_item(curve) plot.set_axis_title(BasePlot.X_BOTTOM, 'Time') plot.set_axis_unit(BasePlot.X_BOTTOM, eval_points.dimensionality.string) plot.set_axis_title(BasePlot.Y_LEFT, 'Rate') plot.set_axis_unit(BasePlot.Y_LEFT, 'Hz') l = make.legend() plot.add_item(l) win.add_plot_widget(pW, 0) win.add_custom_curve_tools() win.add_legend_option([l], True) win.show() return winspykeutils-0.4.3/spykeutils/plot/correlogram.py0000644000175000017500000001037012664623646020160 0ustar robrobfrom guiqwt.builder import make from guiqwt.baseplot import BasePlot from guiqwt.plot import BaseCurveWidget import quantities as pq import scipy as sp from .. import SpykeException from ..progress_indicator import ProgressIndicator from ..correlations import correlogram from dialog import PlotDialog import helper @helper.needs_qt def cross_correlogram(trains, bin_size, max_lag=500 * pq.ms, border_correction=True, per_second=True, square=False, time_unit=pq.ms, progress=None): """ Create (cross-)correlograms from a dictionary of spike train lists for different units. :param dict trains: Dictionary of :class:`neo.core.SpikeTrain` lists. :param bin_size: Bin size (time). :type bin_size: Quantity scalar :param max_lag: Maximum time lag for which spikes are considered (end time of calculated correlogram). :type max_lag: Quantity scalar :param bool border_correction: Apply correction for less data at higher timelags. :param bool per_second: If ``True``, the y-axis is count per second, otherwise it is count per spike train. :param bool square: If ``True``, the plot will include all cross-correlograms, even if they are just mirrored versions of each other. The autocorrelograms are displayed as the diagonal of a square plot matrix. If ``False``, mirrored plots are omitted. :param Quantity time_unit: Unit of X-Axis. :param progress: Set this parameter to report progress. :type progress: :class:`spykeutils.progress_indicator.ProgressIndicator` """ if not trains: raise SpykeException('No spike trains for correlogram') if not progress: progress = ProgressIndicator() win_title = 'Correlogram | Bin size ' + str(bin_size) progress.begin('Creating correlogram') progress.set_status('Calculating...') win = PlotDialog(toolbar=True, wintitle=win_title, min_plot_width=150, min_plot_height=100) correlograms, bins = correlogram( trains, bin_size, max_lag, border_correction, per_second, time_unit, progress) x = bins[:-1] + bin_size / 2 crlgs = [] indices = correlograms.keys() for i1 in xrange(len(indices)): start_i = 0 if not square: start_i = i1 for i2 in xrange(start_i, len(indices)): crlgs.append( (correlograms[indices[i1]][indices[i2]], indices[i1], indices[i2])) columns = int(sp.sqrt(len(crlgs))) legends = [] for i, c in enumerate(crlgs): legend_items = [] pW = BaseCurveWidget(win) plot = pW.plot plot.set_antialiasing(True) plot.add_item(make.curve(x, c[0])) # Create legend color = helper.get_object_color(c[1]) color_curve = make.curve( [], [], c[1].name, color, 'NoPen', linewidth=1, marker='Rect', markerfacecolor=color, markeredgecolor=color) legend_items.append(color_curve) plot.add_item(color_curve) if c[1] != c[2]: color = helper.get_object_color(c[2]) color_curve = make.curve( [], [], c[2].name, color, 'NoPen', linewidth=1, marker='Rect', markerfacecolor=color, markeredgecolor=color) legend_items.append(color_curve) plot.add_item(color_curve) legends.append(make.legend(restrict_items=legend_items)) plot.add_item(legends[-1]) if i >= len(crlgs) - columns: plot.set_axis_title(BasePlot.X_BOTTOM, 'Time') plot.set_axis_unit(BasePlot.X_BOTTOM, time_unit.dimensionality.string) if i % columns == 0: plot.set_axis_title(BasePlot.Y_LEFT, 'Correlation') if per_second: plot.set_axis_unit(BasePlot.Y_LEFT, 'count/second') else: plot.set_axis_unit(BasePlot.Y_LEFT, 'count/segment') win.add_plot_widget(pW, i, column=i % columns) win.add_custom_curve_tools() progress.done() win.add_legend_option(legends, True) win.show() if len(crlgs) > 1: win.add_x_synchronization_option(True, range(len(crlgs))) win.add_y_synchronization_option(False, range(len(crlgs))) return winspykeutils-0.4.3/spykeutils/plot/guiqwt_tools.py0000644000175000017500000000450712664623646020411 0ustar robrob""" Additional guiqwt tools to facilitate plot navigation. """ from PyQt4 import QtGui from PyQt4.QtCore import Qt from PyQt4.QtGui import QMessageBox try: from PyQt4 import QtCore _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s from guiqwt.tools import CommandTool, DefaultToolbarID, InteractiveTool from guidata.qthelpers import get_std_icon from guiqwt.config import _ from guiqwt.events import (setup_standard_tool_filter, PanHandler) import icons_rc class HomeTool(CommandTool): """ A command to show all elements in the plot (same as pressing the middle mouse button). """ def __init__(self, manager, toolbar_id=DefaultToolbarID): icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(':/Plottools/Home')), QtGui.QIcon.Normal, QtGui.QIcon.Off) super(HomeTool, self).__init__(manager, 'Home', icon, toolbar_id=toolbar_id) def activate_command(self, plot, checked): """ Activate that command! """ plot.do_autoscale() class PanTool(InteractiveTool): """ Allows panning with the left mouse button. """ TITLE = _("Pan") ICON = "move.png" CURSOR = Qt.OpenHandCursor def setup_filter(self, baseplot): filter = baseplot.filter start_state = filter.new_state() PanHandler(filter, Qt.LeftButton, start_state=start_state) return setup_standard_tool_filter(filter, start_state) class HelpTool(CommandTool): """ A help tool that includes a message what a single middle click does, otherwise identical to the guiqwt tool with the same name. """ def __init__(self, manager, toolbar_id=DefaultToolbarID): super(HelpTool,self).__init__(manager, "Help", get_std_icon("DialogHelpButton", 16), toolbar_id=toolbar_id) def activate_command(self, plot, checked): """Activate tool""" QMessageBox.information(plot, "Help", """Keyboard/mouse shortcuts: - single left-click: item (curve, image, ...) selection - single right-click: context-menu relative to selected item - single middle click: home - shift: on-active-curve (or image) cursor - alt: free cursor - left-click + mouse move: move item (when available) - middle-click + mouse move: pan - right-click + mouse move: zoom""")spykeutils-0.4.3/spykeutils/plot/peri_stimulus_histogram.py0000644000175000017500000001332012664623646022623 0ustar robrobimport scipy as sp import quantities as pq from guiqwt.builder import make from guiqwt.baseplot import BasePlot from guiqwt.plot import BaseCurveWidget from .. import rate_estimation from ..progress_indicator import ProgressIndicator from .. import SpykeException from dialog import PlotDialog import helper @helper.needs_qt def psth(trains, events=None, start=0 * pq.ms, stop=None, bin_size=100 * pq.ms, rate_correction=True, bar_plot=False, time_unit=pq.ms, progress=None): """ Create a peri stimulus time histogram. The peri stimulus time histogram gives an estimate of the instantaneous rate. :param dict trains: A dictionary of :class:`neo.core.SpikeTrain` lists. :param dict events: A dictionary of Event objects, indexed by segment. The events will be at time 0 on the plot. If None, spike trains are used unmodified. :param start: The desired time for the start of the first bin. It will be recalculated if there are spike trains which start later than this time. This parameter can be negative (which could be useful when aligning on events). :type start: Quantity scalar :param stop: The desired time for the end of the last bin. It will be recalculated if there are spike trains which end earlier than this time. :type stop: Quantity scalar :param bin_size: The bin size for the histogram. :type bin_size: Quantity scalar :param bool rate_correction: Determines if a rates (``True``) or counts (``False``) are shown. :param bool bar_plot: Determines if a bar plot (``True``) or a line plot (``False``) will be created. In case of a bar plot, one plot for each index in ``trains`` will be created. :param Quantity time_unit: Unit of X-Axis. :param progress: Set this parameter to report progress. :type progress: :class:`spykeutils.progress_indicator.ProgressIndicator` """ if not trains: raise SpykeException('No spike trains for PSTH!') if not progress: progress = ProgressIndicator() # Align spike trains for u in trains: if events: trains[u] = rate_estimation.aligned_spike_trains( trains[u], events) rates, bins = rate_estimation.psth( trains, bin_size, start=start, stop=stop, rate_correction=rate_correction) bins = bins.rescale(time_unit) if not psth: raise SpykeException('No spike trains for PSTH!') win_title = 'PSTH | Bin size %.2f %s' % (bin_size, time_unit.dimensionality.string) win = PlotDialog(toolbar=True, wintitle=win_title, min_plot_width=150, min_plot_height=100) legends = [] if bar_plot: ind = 0 columns = int(sp.sqrt(len(rates))) for i, r in rates.iteritems(): if i and hasattr(i, 'name') and i.name: name = i.name else: name = 'Unknown' pW = BaseCurveWidget(win) plot = pW.plot show_rates = list(r) show_rates.insert(0, show_rates[0]) curve = make.curve( bins, show_rates, name, color='k', curvestyle="Steps", shade=1.0) plot.add_item(curve) # Create legend color = helper.get_object_color(i) color_curve = make.curve( [], [], name, color, 'NoPen', linewidth=1, marker='Rect', markerfacecolor=color, markeredgecolor=color) plot.add_item(color_curve) legends.append(make.legend(restrict_items=[color_curve])) plot.add_item(legends[-1]) # Prepare plot plot.set_antialiasing(False) scale = plot.axisScaleDiv(BasePlot.Y_LEFT) plot.setAxisScale(BasePlot.Y_LEFT, 0, scale.upperBound()) if ind % columns == 0: if not rate_correction: plot.set_axis_title(BasePlot.Y_LEFT, 'Spike Count') else: plot.set_axis_title(BasePlot.Y_LEFT, 'Rate') plot.set_axis_unit(BasePlot.Y_LEFT, 'Hz') if ind >= len(trains) - columns: plot.set_axis_title(BasePlot.X_BOTTOM, 'Time') plot.set_axis_unit( BasePlot.X_BOTTOM, time_unit.dimensionality.string) win.add_plot_widget(pW, ind, column=ind % columns) ind += 1 else: bins = 0.5 * sp.diff(bins) + bins[:-1] pW = BaseCurveWidget(win) plot = pW.plot legend_items = [] for i, r in rates.iteritems(): if i and hasattr(i, 'name') and i.name: name = i.name else: name = 'Unknown' curve = make.curve( bins, r, name, color=helper.get_object_color(i)) legend_items.append(curve) plot.add_item(curve) win.add_plot_widget(pW, 0) legends.append(make.legend(restrict_items=legend_items)) plot.add_item(legends[-1]) if not rate_correction: plot.set_axis_title(BasePlot.Y_LEFT, 'Spike Count') else: plot.set_axis_title(BasePlot.Y_LEFT, 'Rate') plot.set_axis_unit(BasePlot.Y_LEFT, 'Hz') plot.set_axis_title(BasePlot.X_BOTTOM, 'Time') plot.set_axis_unit(BasePlot.X_BOTTOM, time_unit.dimensionality.string) plot.set_antialiasing(True) win.add_custom_curve_tools() win.add_legend_option(legends, True) progress.done() win.show() if bar_plot and len(rates) > 1: win.add_x_synchronization_option(True, range(len(rates))) win.add_y_synchronization_option(False, range(len(rates))) return winspykeutils-0.4.3/spykeutils/plot/helper.py0000644000175000017500000002035012664623646017122 0ustar robrob""" This module contains some utility functions that are usefult in plot creation. """ import functools import scipy as sp from PyQt4.QtGui import QApplication from PyQt4 import QtCore from PyQt4.QtGui import QProgressDialog from guiqwt.builder import make from ..progress_indicator import (ProgressIndicator, CancelException) class _MarkerName: """ Helper class to create marker name functions for different strings. """ def __init__(self, name): self.name = name #noinspection PyUnusedLocal def get_name(self, x, y): return self.name def _needs_qt(function): @functools.wraps(function) def inner(*args, **kwargs): if not QApplication.instance(): _needs_qt.app = QApplication([]) ret = function(*args, **kwargs) if _needs_qt.app: _needs_qt.app.exec_() return ret return inner _needs_qt.app = None # Make need_qt decorator preserve signature if decorator package is available try: from decorator import FunctionMaker def decorator_apply(dec, func): return FunctionMaker.create( func, 'return decorated(%(signature)s)', dict(decorated=dec(func)), __wrapped__=func) def needs_qt(func): """ Decorator for functions making sure that an initialized PyQt exists. """ return decorator_apply(_needs_qt, func) except ImportError: needs_qt = _needs_qt # Optimum contrast color palette (without white and black), see # http://web.media.mit.edu/~wad/color/palette.html __default_colors = [ '#575757', # Dark Gray '#ad2323', # Red '#2a4bd7', # Blue '#296914', # Green '#614a19', # Brown (lower R to better distinguish from purple) '#8126c0', # Purple '#a0a0a0', # Light Gray '#81c57a', # Light Green '#9dafff', # Light Blue '#29d0d0', # Cyan '#ff9233', # Orange '#ffee33', # Yellow '#b6ab88', # Tan (darkened for better visibility on white background) '#ff89d1'] # Pink (darkened for better visibility on white background) __colors = __default_colors def get_color(entity_id): """ Return a color for an int. """ return __colors[entity_id % len(__colors)] def get_object_color(unit): """ Return a color for a Neo object, based on the 'unique_id' annotation. If the annotation is not present, return a color based on the hash of the object. """ try: if 'unique_id' in unit.annotations: return get_color(unit.annotations['unique_id']) except Exception: return get_color(hash(unit)) return get_color(hash(unit)) def set_color_scheme(colors): """ Set the color scheme used in plots. :param sequence colors: A list of strings with HTML-style color codes (e.g. ``'#ffffff'`` for white). If this is ``None`` or empty, the default color scheme will be set. """ global __colors global __default_colors if not colors: __colors = __default_colors else: __colors = colors def add_events(plot, events, units=None): """ Add Event markers to a guiqwt plot. :param plot: The plot object. :type plot: :class:`guiqwt.baseplot.BasePlot` :param sequence events: The events (neo :class:`neo.Event` objects). :param Quantity units: The x-scale of the plot. If this is ``None``, the time unit of the events will be use. """ for m in events: nameObject = _MarkerName(m.label) if units: time = m.time.rescale(units) else: time = m.time plot.add_item( make.marker( (time, 0), nameObject.get_name, movable=False, markerstyle='|', color='k', linestyle=':', linewidth=1)) def add_spikes(plot, train, color='k', spike_width=1, spike_height=20000, y_offset=0, name='', units=None): """ Add all spikes from a spike train to a guiqwt plot as vertical lines. :param plot: The plot object. :type plot: :class:`guiqwt.baseplot.BasePlot` :param train: A spike train with the spike times to show. :type train: :class:`neo.core.SpikeTrain` :param str color: The color for the spikes. :param int spike_width: The width of the shown spikes in pixels. :param int spike_height: The height of the shown spikes in pixels. :param float y_offset: An offset for the drawing position on the y-axis. :param str name: The name of the spike train. :param Quantity units: The x-scale of the plot. If this is ``None``, the time unit of the events will be use. :returns: The plot item added for the spike train """ if units: train = train.rescale(units) spikes = make.curve( train, sp.zeros(len(train)) + y_offset, name, 'k', 'NoPen', linewidth=0, marker='Rect', markerfacecolor=color, markeredgecolor=color) s = spikes.symbol() s.setSize(spike_width - 1, spike_height) spikes.setSymbol(s) plot.add_item(spikes) return spikes def add_epochs(plot, epochs, units=None): """ Add Epoch markers to a guiqwt plot. :param plot: The plot object. :type plot: :class:`guiqwt.baseplot.BasePlot` :param sequence epochs: The epochs (neo :class:`neo.Epoch` objects). :param units: The x-scale of the plot. If this is ``None``, the time unit of the events will be use. """ for e in epochs: if units: start = e.time.rescale(units) end = (e.time + e.duration).rescale(units) else: start = e.time end = e.time + e.duration time = (start + end) / 2.0 o = make.range(start, end) o.setTitle(e.label) o.set_readonly(True) o.set_movable(False) o.set_resizable(False) o.set_selectable(False) o.set_rotatable(False) plot.add_item(o) nameObject = _MarkerName(e.label) plot.add_item(make.marker( (time, 0), nameObject.get_name, movable=False, markerstyle='|', color='k', linestyle='NoPen', linewidth=1)) def make_window_legend(win, objects, show_option=None): """ Create a legend in a PlotDialog for a given sequence of neo objects. :param win: The window where the legend will be added. :type win: :class:`spykeutils.plot.dialogs.PlotDialog` :param sequence objects: A list of neo objects which will be included in the legend. :param bool show_option: Determines whether a toggle for the legend will be shown (if the parameter is not ``None``) and if the legend is visible initially (``True``/``False``). """ if not objects: return legend = [] for u in objects: if u is not None: name = u.name else: name = 'No identifier' legend.append((get_object_color(u), name)) win.add_color_legend(legend, show_option) class ProgressIndicatorDialog(ProgressIndicator, QProgressDialog): """ This class implements :class:`spykeutils.progress_indicator.ProgressIndicator` as a ``QProgressDialog``. It can be used to indicate progress in a graphical user interface. Qt needs to be initialized in order to use it. """ def __init__(self, parent, title='Processing...'): QProgressDialog.__init__(self, parent) self.setWindowTitle(title) self.setMinimumWidth(500) self.setAutoReset(False) def set_ticks(self, ticks): self.setMaximum(ticks) if self.isVisible(): self.setValue(0) def begin(self, title='Processing...'): self.setWindowTitle(title) self.setLabelText('') self.setValue(0) if not self.isVisible(): self.reset() self.open() QtCore.QCoreApplication.instance().processEvents() def step(self, num_steps=1): if not self.isVisible(): return self.setValue(self.value() + num_steps) QtCore.QCoreApplication.instance().processEvents() if self.wasCanceled(): self.done() raise CancelException() super(ProgressIndicatorDialog, self).step() def set_status(self, status): self.setLabelText(status) QtCore.QCoreApplication.instance().processEvents() def done(self): self.reset() spykeutils-0.4.3/spykeutils/plot/analog_signals.py0000644000175000017500000002005612664623646020627 0ustar robrobfrom __future__ import division import scipy as sp import quantities as pq from guiqwt.builder import make from guiqwt.baseplot import BasePlot from guiqwt.plot import BaseCurveWidget from ..progress_indicator import ProgressIndicator from .. import conversions from .. import SpykeException from dialog import PlotDialog import helper @helper.needs_qt def signals(signals, events=None, epochs=None, spike_trains=None, spikes=None, show_waveforms=True, use_subplots=True, subplot_names=True, time_unit=pq.s, y_unit=None, progress=None): """ Create a plot from a list of analog signals. :param list signals: The list of :class:`neo.core.AnalogSignal` objects to plot. :param sequence events: A list of Event objects to be included in the plot. :param sequence epochs: A list of Epoch objects to be included in the plot. :param list spike_trains: A list of :class:`neo.core.SpikeTrain` objects to be included in the plot. The ``unit`` property (if it exists) is used for color and legend entries. :param list spikes: A list :class:`neo.core.Spike` objects to be included in the plot. The ``unit`` property (if it exists) is used for color and legend entries. :param bool show_waveforms: Determines if spikes from :class:`neo.core.Spike` and :class:`neo.core.SpikeTrain` objects are shown as waveforms (if available) or vertical lines. :param bool use_subplots: Determines if a separate subplot for is created each signal. :param bool subplot_names: Only valid if ``use_subplots`` is True. Determines if signal (or channel) names are shown for subplots. :param Quantity time_unit: The unit of the x axis. :param progress: Set this parameter to report progress. :type progress: :class:`spykeutils.progress_indicator.ProgressIndicator` """ if not signals: raise SpykeException( 'Cannot create signal plot: No signal data provided!') if not progress: progress = ProgressIndicator() # Plot title win_title = 'Analog Signal' if len(set((s.recordingchannel for s in signals))) == 1: if signals[0].recordingchannel and signals[0].recordingchannel.name: win_title += ' | Recording Channel: %s' %\ signals[0].recordingchannel.name if len(set((s.segment for s in signals))) == 1: if signals[0].segment and signals[0].segment.name: win_title += ' | Segment: %s' % signals[0].segment.name win = PlotDialog(toolbar=True, wintitle=win_title) if events is None: events = [] if epochs is None: epochs = [] if spike_trains is None: spike_trains = [] if spikes is None: spikes = [] if show_waveforms: for st in spike_trains: if st.waveforms is not None: spikes.extend(conversions.spike_train_to_spikes(st)) spike_trains = [] else: unit_spikes = {} for s in spikes: unit_spikes.setdefault(s.unit, []).append(s) for sps in unit_spikes.itervalues(): spike_trains.append(conversions.spikes_to_spike_train(sps, False)) spikes = [] channels = range(len(signals)) channel_indices = [] for s in signals: if not s.recordingchannel: channel_indices.append(-1) else: channel_indices.append(s.recordingchannel.index) # Heuristic: If multiple channels have the same index, use channel order # as index for spike waveforms nonindices = max(0, channel_indices.count(-1) - 1) if len(set(channel_indices)) != len(channel_indices) - nonindices: channel_indices = range(len(signals)) progress.set_ticks((len(spike_trains) + len(spikes) + 1) * len(channels)) offset = 0 * signals[0].units if use_subplots: plot = None for c in channels: pW = BaseCurveWidget(win) plot = pW.plot if subplot_names: if signals[c].name: win.set_plot_title(plot, signals[c].name) elif signals[c].recordingchannel: if signals[c].recordingchannel.name: win.set_plot_title( plot, signals[c].recordingchannel.name) sample = (1 / signals[c].sampling_rate).simplified x = (sp.arange(signals[c].shape[0])) * sample + signals[c].t_start x.units = time_unit helper.add_epochs(plot, epochs, x.units) if y_unit is not None: plot.add_item(make.curve(x, signals[c].rescale(y_unit))) else: plot.add_item(make.curve(x, signals[c])) helper.add_events(plot, events, x.units) _add_spike_waveforms( plot, spikes, x.units, channel_indices[c], offset, progress) for train in spike_trains: color = helper.get_object_color(train.unit) helper.add_spikes(plot, train, color, units=x.units) progress.step() win.add_plot_widget(pW, c) plot.set_axis_unit( BasePlot.Y_LEFT, signals[c].dimensionality.string) progress.step() plot.set_axis_title(BasePlot.X_BOTTOM, 'Time') plot.set_axis_unit(BasePlot.X_BOTTOM, x.dimensionality.string) else: channels.reverse() pW = BaseCurveWidget(win) plot = pW.plot helper.add_epochs(plot, epochs, time_unit) # Find plot y offset max_offset = 0 * signals[0].units for i, c in enumerate(channels[1:], 1): cur_offset = signals[channels[i - 1]].max() - signals[c].min() if cur_offset > max_offset: max_offset = cur_offset offset -= signals[channels[0]].min() for c in channels: sample = (1 / signals[c].sampling_rate).simplified x = (sp.arange(signals[c].shape[0])) * sample + signals[c].t_start x.units = time_unit if y_unit is not None: plot.add_item( make.curve(x, (signals[c] + offset).rescale(y_unit))) else: plot.add_item(make.curve(x, signals[c] + offset)) _add_spike_waveforms( plot, spikes, x.units, channel_indices[c], offset, progress) offset += max_offset progress.step() helper.add_events(plot, events, x.units) for train in spike_trains: color = helper.get_object_color(train.unit) helper.add_spikes(plot, train, color, units=x.units) progress.step() win.add_plot_widget(pW, 0) plot.set_axis_title(BasePlot.X_BOTTOM, 'Time') plot.set_axis_unit(BasePlot.X_BOTTOM, x.dimensionality.string) plot.set_axis_unit(BasePlot.Y_LEFT, signals[0].dimensionality.string) win.add_custom_curve_tools() units = set([s.unit for s in spike_trains]) units = units.union([s.unit for s in spikes]) progress.done() helper.make_window_legend(win, units, False) win.show() if use_subplots: win.add_x_synchronization_option(True, channels) win.add_y_synchronization_option(False, channels) return win def _add_spike_waveforms(plot, spikes, x_units, channel, offset, progress): for spike in spikes: if spike.waveform is None or not spike.sampling_rate: continue if channel < 0 or spike.waveform.shape[1] <= channel: continue color = helper.get_object_color(spike.unit) if spike.left_sweep: lsweep = spike.left_sweep else: lsweep = 0.0 * pq.ms start = (spike.time - lsweep).rescale(x_units) stop = (spike.waveform.shape[0] / spike.sampling_rate + spike.time - lsweep).rescale(x_units) spike_x = sp.linspace( start, stop, spike.waveform.shape[0], endpoint=False) * x_units plot.add_item( make.curve(spike_x, spike.waveform[:, channel] + offset, color=color, linewidth=2)) progress.step() spykeutils-0.4.3/spykeutils/plot/dialog.py0000755000175000017500000005164612664623646017121 0ustar robrob""" Contains a helper class for creating windows containing guiqwt plots. """ from PyQt4 import QtGui from PyQt4.QtCore import Qt from PyQt4.QtGui import (QDialog, QGridLayout, QToolBar, QHBoxLayout, QVBoxLayout, QFrame, QWidget) try: from PyQt4 import QtCore _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s from PyQt4.QtGui import QColor, QScrollArea, QWidget, QFrame from guiqwt.baseplot import BasePlot from guiqwt.curve import CurvePlot from guiqwt.image import ImagePlot from guiqwt.plot import PlotManager from guiqwt.shapes import Marker from guiqwt.curve import CurveItem from guiqwt.tools import (SelectTool, RectZoomTool, BasePlotMenuTool, DeleteItemTool, ItemListPanelTool, AntiAliasingTool, AxisScaleTool, DisplayCoordsTool, ExportItemDataTool, ItemCenterTool, SignalStatsTool, ColormapTool, ReverseYAxisTool, AspectRatioTool, ContrastPanelTool, XCSPanelTool, YCSPanelTool, CrossSectionTool, AverageCrossSectionTool, SaveAsTool, PrintTool, CopyToClipboardTool) try: # guiqwt<3.0 from guiqwt.signals import SIG_PLOT_AXIS_CHANGED old_guiqwt = True except ImportError: old_guiqwt = False from guidata.configtools import get_icon import icons_rc import guiqwt_tools # Monkeypatch curve and image plot so synchronizing axes works with all tools def fixed_do_zoom_rect_view(self, *args, **kwargs): CurvePlot.old_do_zoom_rect_view(self, *args, **kwargs) if old_guiqwt: self.emit(SIG_PLOT_AXIS_CHANGED, self) else: self.SIG_PLOT_AXIS_CHANGED.emit(self) CurvePlot.old_do_zoom_rect_view = CurvePlot.do_zoom_rect_view CurvePlot.do_zoom_rect_view = fixed_do_zoom_rect_view def fixed_do_autoscale(self, *args, **kwargs): CurvePlot.old_do_autoscale(self, *args, **kwargs) if not isinstance(self, ImagePlot): if old_guiqwt: self.emit(SIG_PLOT_AXIS_CHANGED, self) else: self.SIG_PLOT_AXIS_CHANGED.emit(self) CurvePlot.old_do_autoscale = CurvePlot.do_autoscale CurvePlot.do_autoscale = fixed_do_autoscale def fixed_do_autoscale_image(self, *args, **kwargs): ImagePlot.old_do_autoscale(self, *args, **kwargs) if old_guiqwt: self.emit(SIG_PLOT_AXIS_CHANGED, self) else: self.SIG_PLOT_AXIS_CHANGED.emit(self) ImagePlot.old_do_autoscale = ImagePlot.do_autoscale ImagePlot.do_autoscale = fixed_do_autoscale_image class PlotDialog(QDialog, PlotManager): """ Implements a dialog to which an arbitrary number of plots can be added. This class implements a `QDialog` with a number of plots on it. The axes of the plots can be arbitrarily synchronized and option checkboxes can be added which provide callbacks when the checkbox state changes. :param str wintitle: Title of the window. :param bool major_grid: Show major grid in plots. :param bool minor_grid: Show minor grid in plots. :param bool toolbar: Show toolbar. :param parent: Parent window. :param panels: A list of guiqwt panels to be added to the window. :param int min_plot_width: Default minimum width for plots. :param int min_plot_height: Default minimum height for plots. """ def __init__(self, wintitle='Plot window', major_grid=True, minor_grid=False, toolbar=True, parent=None, panels=None, min_plot_width=100, min_plot_height=75): QDialog.__init__(self, parent) self.setWindowFlags(Qt.Window) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(':/Application/Main')), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.setWindowIcon(icon) self.major_grid = major_grid self.minor_grid = minor_grid self.min_plot_width = min_plot_width self.min_plot_height = min_plot_height # WidgetMixin copy PlotManager.__init__(self, main=self) self.main_layout = QVBoxLayout(self) self.color_layout = QHBoxLayout() self.plot_layout = QGridLayout() self.plot_layout.setMargin(0) self.plot_scroll_widget = QWidget() self.plot_scroll_area = QScrollArea() self.plot_scroll_area.setFrameShape(QFrame.NoFrame) self.plot_scroll_area.setWidgetResizable(True) self.option_layout = QHBoxLayout() self.plot_widget = None if panels is not None: for panel in panels: self.add_panel(panel) self.toolbar = QToolBar('Tools') if not toolbar: self.toolbar.hide() # Configuring widget layout self._setup_widget_properties(wintitle=wintitle, icon=icon) self._setup_widget_layout() # Options self.option_callbacks = {} self.legend = None self.axis_syncplots = {} def _setup_widget_properties(self, wintitle, icon): self.setWindowTitle(wintitle) if isinstance(icon, basestring): icon = get_icon(icon) self.setWindowIcon(icon) self.setMinimumSize(320, 240) self.resize(720, 540) def _setup_widget_layout(self): self.main_layout.addWidget(self.toolbar) self.main_layout.addLayout(self.color_layout) self.main_layout.addWidget(self.plot_scroll_area) self.plot_scroll_area.setWidget(self.plot_scroll_widget) self.plot_scroll_widget.setLayout(self.plot_layout) self.main_layout.addLayout(self.option_layout) self.setLayout(self.main_layout) def add_custom_curve_tools(self, antialiasing=True, activate_zoom=True, signal_stats=False): """ Adds typically needed curve tools to the window. :param bool antialiasing: Determines if the antialiasing tool is added. :param bool activate_zoom: Determines if the zoom tool is activated initially (otherwise, the selection tool will be activated). :param bool signal_stats: Determines if the signal stats tool is available. """ self.add_toolbar(self.toolbar) t = self.add_tool(SelectTool) if not activate_zoom: self.set_default_tool(t) self.add_tool(BasePlotMenuTool, "item") self.add_tool(ExportItemDataTool) try: # Old versions of guiqwt and spyderlib do not support this import spyderlib.widgets.objecteditor from guiqwt.tools import EditItemDataTool self.add_tool(EditItemDataTool) except ImportError: pass self.add_tool(ItemCenterTool) self.add_tool(DeleteItemTool) self.add_separator_tool() t = self.add_tool(RectZoomTool) if activate_zoom: self.set_default_tool(t) self.add_tool(guiqwt_tools.HomeTool) self.add_tool(guiqwt_tools.PanTool) self.add_separator_tool() self.add_tool(BasePlotMenuTool, "grid") self.add_tool(BasePlotMenuTool, "axes") self.add_tool(DisplayCoordsTool) if self.get_itemlist_panel(): self.add_tool(ItemListPanelTool) if signal_stats: self.add_separator_tool() self.add_tool(SignalStatsTool) if antialiasing: self.add_tool(AntiAliasingTool) self.add_tool(AxisScaleTool) self.add_separator_tool() self.add_tool(SaveAsTool) self.add_tool(CopyToClipboardTool) self.add_tool(PrintTool) self.add_tool(guiqwt_tools.HelpTool) self.add_separator_tool() self.get_default_tool().activate() def add_custom_image_tools(self, activate_zoom=True): """ Adds typically needed image tools to the window. """ self.add_toolbar(self.toolbar) t = self.add_tool(SelectTool) if not activate_zoom: self.set_default_tool(t) self.add_tool(BasePlotMenuTool, "item") self.add_tool(ExportItemDataTool) try: # Old versions of guiqwt and spyderlib do not support this import spyderlib.widgets.objecteditor from guiqwt.tools import EditItemDataTool self.add_tool(EditItemDataTool) except ImportError: pass self.add_tool(ItemCenterTool) self.add_tool(DeleteItemTool) self.add_separator_tool() t = self.add_tool(RectZoomTool) if activate_zoom: self.set_default_tool(t) self.add_tool(guiqwt_tools.HomeTool) self.add_tool(guiqwt_tools.PanTool) self.add_separator_tool() self.add_tool(BasePlotMenuTool, "grid") self.add_tool(BasePlotMenuTool, "axes") self.add_tool(DisplayCoordsTool) if self.get_itemlist_panel(): self.add_tool(ItemListPanelTool) self.add_separator_tool() self.add_tool(ColormapTool) self.add_tool(ReverseYAxisTool) self.add_tool(AspectRatioTool) if self.get_contrast_panel(): self.add_tool(ContrastPanelTool) if self.get_xcs_panel() and self.get_ycs_panel(): self.add_tool(XCSPanelTool) self.add_tool(YCSPanelTool) self.add_tool(CrossSectionTool) self.add_tool(AverageCrossSectionTool) self.add_separator_tool() self.add_tool(SaveAsTool) self.add_tool(CopyToClipboardTool) self.add_tool(PrintTool) self.add_tool(guiqwt_tools.HelpTool) self.add_separator_tool() self.get_default_tool().activate() def add_option(self, name, change_callback, active=False): """ Add an option (using a checkbox) to the window. :param str name: The name of the option. :param func change_callback: A function accepting the new state as a parameter. The function will be called whenever the state of the option changes. :param bool active: Determines if the option is activated initially. """ checkBox = QtGui.QCheckBox(self) checkBox.setChecked(active) checkBox.setText(name) checkBox.stateChanged.connect(self._option_callback) self.option_callbacks[checkBox] = change_callback self.option_layout.addWidget(checkBox) def add_x_synchronization_option(self, active, ids=None): """ Offer an option for X axes synchronization. This method should be called after show(), so that a proper initial synchronization can be performed. :param bool active: Determines whether the axes are synchronized initially. :param sequence ids: List of plot ids to synchronize. """ self.axis_syncplots[BasePlot.X_BOTTOM] = ids if active and ids: self.synchronize_axis(BasePlot.X_BOTTOM) self.add_option('Synchronize X Axes', PlotDialog._synchronization_option_x, active) def add_y_synchronization_option(self, active, ids=None): """ Offer an option for Y axes synchronization. This method should be called after show(), so that a proper initial synchronization can be performed. :param bool active: Determines whether the axes are synchronized initially :param sequence ids: List of plot ids to synchronize. """ self.axis_syncplots[BasePlot.Y_LEFT] = ids if active and ids: self.synchronize_axis(BasePlot.Y_LEFT) self.add_option('Synchronize Y Axes', PlotDialog._synchronization_option_y, active) def _synchronization_option_x(self, state): """ Callback for x-axis synchronization """ if state: self.synchronize_axis(BasePlot.X_BOTTOM) else: self.unsynchronize_axis(BasePlot.X_BOTTOM) def _synchronization_option_y(self, state): """ Callback for y-axis synchronization """ if state: self.synchronize_axis(BasePlot.Y_LEFT) else: self.unsynchronize_axis(BasePlot.Y_LEFT) def replace_colors(self, replace_list): """ Replace colors of items in all plots. This can be useful when changing the background color to black and black items should be drawn in white: ``replace_colors([('#000000', '#ffffff']))`` :param list replace_list: A list of tuples of Qt color names. The first color in each tuple is replaced by the second color. """ for plot in self.plots.itervalues(): for i in plot.get_items(): if isinstance(i, CurveItem): pen = i.pen() elif isinstance(i, Marker): pen = i.linePen() else: continue for color in replace_list: c1 = QColor(color[0]) c2 = QColor(color[1]) if pen.color() != c1: continue pen.setColor(c2) break if isinstance(i, CurveItem): i.setPen(pen) elif isinstance(i, Marker): i.setLinePen(pen) plot.replot() def set_background_color(self, color): """ Set the background color for all plots. :param str color: A Qt color name (e.g. '#ff0000') """ for p in self.plots.itervalues(): p.setCanvasBackground(QColor(color)) p.replot() def add_unit_color(self, color, name='Unit color:'): """ Create a small legend on top of the window with only one entry. :param str color: A Qt color name (e.g. '#ff0000') :param str name: The name of the legend item. It will be displayed on the left of the color. """ label = QtGui.QLabel(self) label.setText(name) label.setAlignment(Qt.AlignRight) self.color_layout.addWidget(label) label = QtGui.QLabel(self) label.setStyleSheet('background-color:' + str(color)) label.setFrameShape(QFrame.StyledPanel) label.setMaximumWidth(80) self.color_layout.addWidget(label) def add_custom_label(self, legend_string): """ Add a label on the right of the plots :param str legend_string: An arbitrary string (which can contain newlines) to display on the right of the plots """ label = QtGui.QLabel(self) label.setText(legend_string) self.plot_layout.addWidget( label, 0, self.plot_layout.columnCount(), -1, 1) def add_color_legend(self, legend, show_option=None): """ Create a legend on the right of the plots with colors and names. :param sequence legend: List of (color, text) tuples, where `color` is a Qt color name (e.g. '#ff0000') and `text` is the corresponding text to display in the legend. :param bool show_option: Determines whether a toggle for the legend will be shown (if the parameter is not ``None``) and if the legend is visible initially (``True``/``False``). """ widget = QWidget(self) layout = QGridLayout(widget) widget.setLayout(layout) for l in legend: label = QtGui.QLabel(self) label.setStyleSheet('background-color:' + str(l[0])) label.setFrameShape(QFrame.StyledPanel) label.setMaximumWidth(80) label.setMaximumHeight(12) layout.addWidget(label, layout.rowCount(), 0, 1, 1) label = QtGui.QLabel(self) label.setText(l[1]) layout.addWidget(label, layout.rowCount() - 1, 1, 1, 1) self.plot_layout.addWidget( widget, 0, self.plot_layout.columnCount(), -1, 1) if show_option is not None: widget.setVisible(show_option) self.add_option( 'Show Legend Sidebar', lambda w, s: widget.setVisible(s), show_option) def add_legend_option(self, legends, active): """ Create a user option to show or hide a list of legend objects. :param sequence legends: The legend objects affected by the option. :param bool active: Determines whether the legends will be visible initially. """ self.legends = legends self._set_legend_visibility(active) self.add_option('Show legend', self._legend_callback, active) if active: self._set_legend_visibility(True) def _option_callback(self, state): self.option_callbacks[self.sender()](self, state) #noinspection PyUnusedLocal def _legend_callback(self, win, state): self._set_legend_visibility(state > 0) def _set_legend_visibility(self, visible): for p in self.plots.itervalues(): for l in self.legends: p.set_item_visible(l, visible) def add_plot_widget(self, plot_widget, plot_id, row=-1, column=0, min_plot_width=None, min_plot_height=None): """ Adds a guiqwt plot to the window. :param plot_widget: The plot to add. :type plot_widget: guiqwt plot widget :param int plot_id: The id of the new plot. :param int row: The row of the new plot. If this is -1, the new plot will be added in a new row (if `column` is 0) or in the last row. :param int column: The column of the new plot. :param int min_plot_width: The minimum width of this plot. If ``None``, the default minimum width for this dialog is used. :param int max_plot_height: The minimum height of this plot. If ``None``, the default minimum height for this dialog is used. """ if row == -1: if column == 0: row = self.plot_layout.rowCount() else: row = self.plot_layout.rowCount() - 1 pw = min_plot_width if pw is None: pw = self.min_plot_width ph = min_plot_height if ph is None: ph = self.min_plot_height plot_widget.setMinimumSize(pw, ph) self.plot_layout.addWidget(plot_widget, row, column) new_plot = plot_widget.plot self.add_plot(new_plot, plot_id) def synchronize_axis(self, axis, plots=None): if plots is None: if axis in self.axis_syncplots: plots = self.axis_syncplots[axis] else: plots = self.plots.keys() if len(plots) < 1: return PlotManager.synchronize_axis(self, axis, plots) # Find interval that needs to be shown in order to include all # currently shown parts in the synchronized plots plot_objects = [self.plots[p] for p in plots] lb = min((p.axisScaleDiv(axis).lowerBound() for p in plot_objects)) ub = max((p.axisScaleDiv(axis).upperBound() for p in plot_objects)) for p in plot_objects: p.setAxisScale(axis, lb, ub) p.replot() def unsynchronize_axis(self, axis, plots=None): if plots is None: if axis in self.axis_syncplots: plots = self.axis_syncplots[axis] else: plots = self.plots.keys() for plot_id in plots: if not plot_id in self.synchronized_plots: continue synclist = self.synchronized_plots[plot_id] for plot2_id in plots: if plot_id == plot2_id: continue item = (axis, plot2_id) if item in synclist: synclist.remove(item) def plot_axis_changed(self, plot): ids = [k for k, p in self.plots.iteritems() if p == plot] if len(ids) < 1: return plot_id = ids[0] if plot_id not in self.synchronized_plots: return for (axis, other_plot_id) in self.synchronized_plots[plot_id]: scalediv = plot.axisScaleDiv(axis) other = self.get_plot(other_plot_id) lb = scalediv.lowerBound() ub = scalediv.upperBound() other.setAxisScale(axis, lb, ub) other.replot() def set_plot_title(self, plot, title): """ Set the title of a guiqwt plot and use the same font as for the rest of the window. :param plot: The plot for which the title is set. :param str title: The new title of the plot. """ plot.setTitle(title) l = plot.titleLabel() l.setFont(self.font()) plot.setTitle(l.text()) def show(self): for p in self.plots.itervalues(): if not self.minor_grid: p.grid.gridparam.min_xenabled = False p.grid.gridparam.min_yenabled = False if not self.major_grid: p.grid.gridparam.maj_xenabled = False p.grid.gridparam.maj_yenabled = False p.grid.update_params() super(PlotDialog, self).show() spykeutils-0.4.3/spykeutils/plot/spike_waveforms.py0000644000175000017500000003642212664623646021056 0ustar robrobimport scipy as sp import quantities as pq from guiqwt.builder import make from guiqwt.baseplot import BasePlot from guiqwt.plot import BaseCurveWidget from PyQt4 import Qt from ..progress_indicator import ProgressIndicator from .. import SpykeException from dialog import PlotDialog import helper @helper.needs_qt def spikes(spikes, axes_style, strong=None, anti_alias=False, fade=1.0, subplot_layout=0, time_unit=pq.ms, progress=None): """ Create a plot dialog with spike waveforms. Assumes that all spikes have waveforms with the same number of channels. :param dict spikes: A dictionary of :class:`neo.core.Spike` lists. :param int axes_style: Plotting mode. The following values are possible: - 1: Show each channel in a seperate plot, split vertically. - 2: Show each channel in a separate plot, split horizontally. - 3: Show each key of ``spikes`` in a separate plot, channels are split vertically. - 4: Show each key of ``spikes`` in a separate plot, channels are split horizontally. - 5: Show all channels in the same plot, split vertically. - 6: Show all channels in the same plot, split horizontally. :param dict strong: A dictionary of :class:`neo.core.Spike` lists. When given, these spikes are shown as thick lines on top of the regular spikes in the respective plots. :param bool anti_alias: Determines whether an antialiased plot is created. :param float fade: Vary transparency by segment. For values > 0, the first spike for each unit is displayed with the corresponding alpha value and alpha is linearly interpolated until it is 1 for the last spike. For values < 0, alpha is 1 for the first spike and ``fade`` for the last spike. Does not affect spikes from ``strong``. :param bool subplot_layout: The way subplots are arranged on the window: - 0: Linear - horizontally or vertically, depending on ``axis_style``. - 1: Square - this layout tries to have the same number of plots per row and per column. :param Quantity time_unit: Unit of X-Axis. :param progress: Set this parameter to report progress. :type progress: :class:`spykeutils.progress_indicator.ProgressIndicator` """ if (not spikes or sum((len(l) for l in spikes.itervalues())) < 1) and \ (not strong or sum((len(l) for l in strong.itervalues())) < 1): raise SpykeException('No spikes for spike waveform plot!') if not progress: progress = ProgressIndicator() if strong is None: strong = {} progress.begin('Creating waveform plot') progress.set_ticks( sum((len(l) for l in spikes.itervalues())) + sum((len(l) for l in strong.itervalues()))) win_title = 'Spike waveforms' win = PlotDialog(toolbar=True, wintitle=win_title) try: ref_spike = spikes[spikes.keys()[0]][0] except IndexError: ref_spike = strong[strong.keys()[0]][0] if ref_spike.waveform is None: raise SpykeException( 'Cannot create waveform plot: At least one spike ' 'has no waveform or sampling rate!') ref_units = ref_spike.waveform.units channels = range(ref_spike.waveform.shape[1]) # Keys from spikes and strong without duplicates in original order seen = set() indices = [k for k in spikes.keys() + strong.keys() if k not in seen and not seen.add(k)] if axes_style <= 2: # Separate channel plots for c in channels: pw = BaseCurveWidget(win) plot = pw.plot plot.set_antialiasing(anti_alias) for u in spikes: color = helper.get_object_color(u) qcol = Qt.QColor(color) alpha = fade if fade > 0.0 else 1.0 alpha_step = 1.0 - fade if fade > 0.0 else -1.0 - fade alpha_step /= len(spikes[u]) if len(spikes[u]) == 1: alpha = 1.0 for s in spikes[u]: if s.waveform is None or s.sampling_rate is None: raise SpykeException( 'Cannot create waveform plot: ' 'At least one spike has no ' 'waveform or sampling rate!') x = (sp.arange(s.waveform.shape[0]) / s.sampling_rate).rescale(time_unit) curve = make.curve( x, s.waveform[:, c].rescale(ref_units), title=u.name, color=color) qcol.setAlphaF(alpha) curve.setPen(Qt.QPen(qcol)) alpha += alpha_step plot.add_item(curve) progress.step() for u in strong: color = helper.get_object_color(u) for s in strong[u]: x = (sp.arange(s.waveform.shape[0]) / s.sampling_rate).rescale(time_unit) outline = make.curve( x, s.waveform[:, c].rescale(ref_units), color='#000000', linewidth=4) curve = make.curve( x, s.waveform[:, c].rescale(ref_units), color=color, linewidth=2) plot.add_item(outline) plot.add_item(curve) progress.step() _add_plot(plot, pw, win, c, len(channels), subplot_layout, axes_style, time_unit, ref_units) helper.make_window_legend(win, indices, True) elif axes_style > 4: # Only one plot needed pw = BaseCurveWidget(win) plot = pw.plot plot.set_antialiasing(anti_alias) if axes_style == 6: # Horizontal split l = _split_plot_hor(channels, spikes, strong, fade, ref_units, time_unit, progress, plot) plot.set_axis_title(BasePlot.X_BOTTOM, 'Time') plot.set_axis_unit( BasePlot.X_BOTTOM, time_unit.dimensionality.string) else: # Vertical split channels.reverse() max_offset = _find_y_offset(channels, spikes, strong, ref_units) l = _split_plot_ver(channels, spikes, strong, fade, ref_units, time_unit, progress, max_offset, plot) plot.set_axis_title(BasePlot.Y_LEFT, 'Voltage') plot.set_axis_unit( BasePlot.Y_LEFT, ref_units.dimensionality.string) win.add_plot_widget(pw, 0) win.add_legend_option([l], True) else: # One plot per unit if axes_style == 3: channels.reverse() max_offset = _find_y_offset(channels, spikes, strong, ref_units) for i, u in enumerate(indices): pw = BaseCurveWidget(win) plot = pw.plot plot.set_antialiasing(anti_alias) spk = {} if u in spikes: spk[u] = spikes[u] st = {} if u in strong: st[u] = strong[u] if axes_style == 3: # Vertical split _split_plot_ver(channels, spk, st, fade, ref_units, time_unit, progress, max_offset, plot) else: # Horizontal split _split_plot_hor(channels, spk, st, fade, ref_units, time_unit, progress, plot) _add_plot(plot, pw, win, i, len(indices), subplot_layout, axes_style, time_unit, ref_units) win.add_custom_curve_tools() progress.done() win.show() if axes_style <= 2: if len(channels) > 1: win.add_x_synchronization_option(True, channels) win.add_y_synchronization_option(True, channels) elif axes_style <= 4: if len(spikes) > 1: win.add_x_synchronization_option(True, range(len(spikes))) win.add_y_synchronization_option(True, range(len(spikes))) return win def _add_plot(plot, widget, win, index, total, subplot_layout, axes_style, time_unit, ref_units): """ Add a plot to the window in the right position with the correct axis labels. """ if subplot_layout == 0: if axes_style == 1 or axes_style == 4 or index == 0: plot.set_axis_title(BasePlot.Y_LEFT, 'Voltage') plot.set_axis_unit( BasePlot.Y_LEFT, ref_units.dimensionality.string) if axes_style == 2 or axes_style == 3 or index == total - 1: plot.set_axis_title(BasePlot.X_BOTTOM, 'Time') plot.set_axis_unit( BasePlot.X_BOTTOM, time_unit.dimensionality.string) if axes_style == 1 or axes_style == 4: win.add_plot_widget(widget, index) else: win.add_plot_widget(widget, index, 0, index) else: size = int(sp.sqrt(total) + 0.99) if index % size == 0: plot.set_axis_title(BasePlot.Y_LEFT, 'Voltage') plot.set_axis_unit( BasePlot.Y_LEFT, ref_units.dimensionality.string) if index >= total - size: plot.set_axis_title(BasePlot.X_BOTTOM, 'Time') plot.set_axis_unit( BasePlot.X_BOTTOM, time_unit.dimensionality.string) win.add_plot_widget(widget, index, index / size, index % size) def _find_y_offset(channels, spikes, strong, ref_units): """ Find y offset needed when plotting spikes split vertically by channel. """ max_y = [] min_y = [] if spikes: for i, c in enumerate(channels): max_y.append( max(max(s.waveform[:, c].max() for s in d) for d in spikes.itervalues())) min_y.append( min(min(s.waveform[:, c].min() for s in d) for d in spikes.itervalues())) if strong: for i, c in enumerate(channels): max_y.append( max(max(s.waveform[:, c].max() for s in d) for d in strong.itervalues())) min_y.append( min(min(s.waveform[:, c].min() for s in d) for d in strong.itervalues())) max_offset = 0 * ref_units for i in range(1, len(channels)): offset = max_y[i - 1] - min_y[i] if offset > max_offset: max_offset = offset return max_offset def _split_plot_ver(channels, spikes, strong, fade, ref_units, time_unit, progress, max_offset, plot): """ Fill a plot with spikes vertically split by channel. Returns legend. """ offset = 0 * ref_units for c in channels: for u in spikes: color = helper.get_object_color(u) qcol = Qt.QColor(color) alpha = fade if fade > 0.0 else 1.0 alpha_step = 1.0 - fade if fade > 0.0 else -1.0 - fade alpha_step /= len(spikes[u]) if len(spikes[u]) == 1: alpha = 1.0 for s in spikes[u]: if s.waveform is None or s.sampling_rate is None: raise SpykeException('Cannot create waveform plot: ' 'At least one spike has no ' 'waveform or sampling rate!') x = (sp.arange(s.waveform.shape[0]) / s.sampling_rate).rescale(time_unit) curve = make.curve( x, s.waveform[:, c].rescale(ref_units) + offset, u.name, color=color) qcol.setAlphaF(alpha) curve.setPen(Qt.QPen(qcol)) alpha += alpha_step plot.add_item(curve) progress.step() for u in strong: color = helper.get_object_color(u) for s in strong[u]: x = (sp.arange(s.waveform.shape[0]) / s.sampling_rate).rescale(time_unit) outline = make.curve( x, s.waveform[:, c].rescale(ref_units) + offset, color='#000000', linewidth=4) curve = make.curve( x, s.waveform[:, c].rescale(ref_units) + offset, color=color, linewidth=2) plot.add_item(outline) plot.add_item(curve) progress.step() offset += max_offset l = _add_legend(plot, spikes, strong) return l def _split_plot_hor(channels, spikes, strong, fade, ref_units, time_unit, progress, plot): """ Fill a plot with spikeshorizontally split by channel. Returns legend. """ offset = 0 * time_unit for c in channels: x_off = 0 * time_unit for u in spikes: color = helper.get_object_color(u) qcol = Qt.QColor(color) alpha = fade if fade > 0.0 else 1.0 alpha_step = 1.0 - fade if fade > 0.0 else -1.0 - fade alpha_step /= len(spikes[u]) if len(spikes[u]) == 1: alpha = 1.0 for s in spikes[u]: if s.waveform is None or s.sampling_rate is None: raise SpykeException( 'Cannot create waveform plot: ' 'At least one spike has no ' 'waveform or sampling rate!') x = (sp.arange(s.waveform.shape[0]) / s.sampling_rate).rescale(time_unit) x_off = max(x_off, x[-1]) curve = make.curve( x + offset, s.waveform[:, c].rescale(ref_units), u.name, color=color) qcol.setAlphaF(alpha) curve.setPen(Qt.QPen(qcol)) alpha += alpha_step plot.add_item(curve) progress.step() for u in strong: color = helper.get_object_color(u) for s in strong[u]: x = (sp.arange(s.waveform.shape[0]) / s.sampling_rate).rescale(time_unit) x_off = max(x_off, x[-1]) outline = make.curve( x + offset, s.waveform[:, c].rescale(ref_units), color='#000000', linewidth=4) curve = make.curve( x + offset, s.waveform[:, c].rescale(ref_units), color=color, linewidth=2) plot.add_item(outline) plot.add_item(curve) progress.step() offset += x_off if c != channels[-1]: plot.add_item( make.marker((offset, 0), lambda x, y: '', movable=False, markerstyle='|', color='k', linestyle='-', linewidth=1)) l = _add_legend(plot, spikes, strong) return l def _add_legend(plot, spikes, strong): # Keys from spikes and strong without duplicates in original order seen = set() indices = [k for k in spikes.keys() + strong.keys() if k not in seen and not seen.add(k)] legend_items = [] for u in indices: legend_curve = make.curve( sp.array([0]), sp.array([0]), u.name, color=helper.get_object_color(u), linewidth=2) legend_items.append(legend_curve) plot.add_item(legend_curve) l = make.legend(restrict_items=legend_items) plot.add_item(l) return lspykeutils-0.4.3/spykeutils/plot/rasterplot.py0000644000175000017500000000417212664623646020046 0ustar robrobfrom guiqwt.builder import make from guiqwt.baseplot import BasePlot from guiqwt.plot import BaseCurveWidget import quantities as pq from .. import SpykeException from dialog import PlotDialog import helper @helper.needs_qt def raster(trains, time_unit=pq.ms, show_lines=True, events=None, epochs=None): """ Create a new plotting window with a rasterplot of spiketrains. :param dict trains: Dictionary of spike trains indexed by a Neo object (Unit or Segment). :param Quantity time_unit: Unit of X-Axis. :param bool show_lines: Determines if a horizontal line will be shown for each spike train. :param sequence events: A sequence of neo `Event` objects that will be marked on the plot. """ if not trains: raise SpykeException('No spike trains for rasterplot') if not time_unit: time_unit = pq.ms win_title = 'Spike Trains' win = PlotDialog(toolbar=True, wintitle=win_title, major_grid=False) pW = BaseCurveWidget(win) plot = pW.plot if events is None: events = [] if epochs is None: epochs = [] offset = len(trains) legend_items = [] for u, t in trains.iteritems(): color = helper.get_object_color(u) train = helper.add_spikes( plot, t, color, 2, 21, offset, u.name, time_unit) if u.name: legend_items.append(train) if show_lines: plot.add_item(make.curve( [t.t_start.rescale(time_unit), t.t_stop.rescale(time_unit)], [offset, offset], color='k')) offset -= 1 helper.add_epochs(plot, epochs, time_unit) helper.add_events(plot, events, time_unit) plot.set_axis_title(BasePlot.X_BOTTOM, 'Time') plot.set_axis_unit(BasePlot.X_BOTTOM, time_unit.dimensionality.string) win.add_plot_widget(pW, 0) legend = make.legend(restrict_items=legend_items) plot.add_item(legend) win.add_legend_option([legend], True) if len(trains) > 1: plot.set_axis_limits(BasePlot.Y_LEFT, 0.5, len(trains) + 0.5) win.add_custom_curve_tools() win.show() return winspykeutils-0.4.3/spykeutils/plot/interspike_intervals.py0000644000175000017500000001070112664623646022106 0ustar robrobimport scipy as sp from guiqwt.builder import make from guiqwt.baseplot import BasePlot from guiqwt.plot import BaseCurveWidget import quantities as pq from .. import SpykeException from dialog import PlotDialog import helper @helper.needs_qt def isi(trains, bin_size, cut_off, bar_plot=False, time_unit=pq.ms): """ Create a plot dialog with an interspike interval histogram. :param dict trains: Dictionary with lists of spike trains indexed by units for which to display ISI histograms :param bin_size: Bin size (time) :type bin_size: Quantity scalar :param cut_off: End of histogram (time) :type bin_size: Quantity scalar :param bool bar_plot: If ``True``, create a bar ISI histogram for each index in ``trains``. Else, create a line ISI histogram. :param Quantity time_unit: Unit of X-Axis. """ if not trains: raise SpykeException('No spike trains for ISI histogram') win_title = 'ISI Histogram | Bin size: ' + str(bin_size) win = PlotDialog(toolbar=True, wintitle=win_title, min_plot_width=150, min_plot_height=100) bin_size = bin_size.rescale(time_unit) cut_off = cut_off.rescale(time_unit) bins = sp.arange(0 * time_unit, cut_off, bin_size) * time_unit legends = [] if bar_plot: ind = 0 columns = int(sp.sqrt(len(trains))) for i, train_list in trains.iteritems(): pW = BaseCurveWidget(win) plot = pW.plot intervals = [] for t in train_list: t = t.rescale(time_unit) sTrain = sp.asarray(t) sTrain.sort() intervals.extend(sp.diff(sTrain)) (isi, bins) = sp.histogram(intervals, bins) if i and hasattr(i, 'name') and i.name: name = i.name else: name = 'Unknown' show_isi = list(isi) show_isi.insert(0, show_isi[0]) curve = make.curve( bins, show_isi, name, color='k', curvestyle="Steps", shade=1.0) plot.add_item(curve) # Create legend color = helper.get_object_color(i) color_curve = make.curve( [], [], name, color, 'NoPen', linewidth=1, marker='Rect', markerfacecolor=color, markeredgecolor=color) plot.add_item(color_curve) legends.append(make.legend(restrict_items=[color_curve])) plot.add_item(legends[-1]) # Prepare plot plot.set_antialiasing(False) scale = plot.axisScaleDiv(BasePlot.Y_LEFT) plot.setAxisScale(BasePlot.Y_LEFT, 0, scale.upperBound()) if ind % columns == 0: plot.set_axis_title(BasePlot.Y_LEFT, 'Number of intervals') if ind >= len(trains) - columns: plot.set_axis_title(BasePlot.X_BOTTOM, 'Interval length') plot.set_axis_unit( BasePlot.X_BOTTOM, time_unit.dimensionality.string) win.add_plot_widget(pW, ind, column=ind % columns) ind += 1 else: pW = BaseCurveWidget(win) plot = pW.plot legend_items = [] for i, train_list in trains.iteritems(): intervals = [] for t in train_list: t = t.rescale(time_unit) sTrain = sp.asarray(t) sTrain.sort() intervals.extend(sp.diff(sTrain)) (isi, bins) = sp.histogram(intervals, bins) if i and hasattr(i, 'name') and i.name: name = i.name else: name = 'Unknown' color = helper.get_object_color(i) curve = make.curve(bins, isi, name, color=color) legend_items.append(curve) plot.add_item(curve) win.add_plot_widget(pW, 0) legends.append(make.legend(restrict_items=legend_items)) plot.add_item(legends[-1]) plot.set_antialiasing(True) plot.set_axis_title(BasePlot.Y_LEFT, 'Number of intervals') plot.set_axis_title(BasePlot.X_BOTTOM, 'Interval length') plot.set_axis_unit(BasePlot.X_BOTTOM, time_unit.dimensionality.string) win.add_custom_curve_tools() win.add_legend_option(legends, True) win.show() if bar_plot and len(trains) > 1: win.add_x_synchronization_option(True, range(len(trains))) win.add_y_synchronization_option(False, range(len(trains))) return winspykeutils-0.4.3/spykeutils/plot/spike_amp_hist.py0000644000175000017500000000477312664623646020655 0ustar robrobimport scipy as sp import quantities as pq from guiqwt.plot import BaseImageWidget from guiqwt.builder import make from ..progress_indicator import ProgressIndicator from .. import SpykeException from ..stationarity import spike_amplitude_histogram as sah import helper from dialog import PlotDialog @helper.needs_qt def spike_amplitude_histogram(trains, num_bins, uniform_y_scale=True, x_unit=pq.uV, progress=None): """ Create a spike amplitude histogram. This plot is useful to assess the drift in spike amplitude over a longer recording. It shows histograms (one for each `trains` entry, e.g. segment) of maximum and minimum spike amplitudes. :param list trains: A list of lists of :class:`neo.core.SpikeTrain` objects. Each entry of the outer list will be one point on the x-axis (they could correspond to segments), all amplitude occurences of spikes contained in the inner list will be added up. :param int num_bins: Number of bins for the histograms. :param bool uniform_y_scale: If True, the histogram for each channel will use the same bins. Otherwise, the minimum bin range is computed separately for each channel. :param Quantity x_unit: Unit of Y-Axis. :param progress: Set this parameter to report progress. :type progress: :class:`spykeutils.progress_indicator.ProgressIndicator` :return: """ if not trains: raise SpykeException('No spikes trains for Spike Amplitude Histogram!') if not progress: progress = ProgressIndicator() hist, down, up = sah(trains, num_bins, uniform_y_scale, x_unit, progress) num_channels = len(down) columns = int(round(sp.sqrt(num_channels))) win = PlotDialog(toolbar=True, wintitle="Spike Amplitude Histogram") for c in xrange(num_channels): pW = BaseImageWidget( win, yreverse=False, lock_aspect_ratio=False) plot = pW.plot img = make.image(sp.log(hist[:, :, c] + 1), ydata=[down[c], up[c]], interpolation='nearest') plot.add_item(img) plot.set_axis_title(plot.Y_LEFT, 'Amplitude') plot.set_axis_unit(plot.Y_LEFT, x_unit.dimensionality.string) win.add_plot_widget(pW, c, column=c % columns) progress.done() win.add_custom_image_tools() win.add_x_synchronization_option(True, range(num_channels)) win.add_y_synchronization_option(uniform_y_scale, range(num_channels)) win.show() return winspykeutils-0.4.3/spykeutils/plot/icons_rc.py0000644000175000017500000014117512664623646017453 0ustar robrob# -*- coding: utf-8 -*- # Resource object code # # Created: Di. Nov 27 12:08:49 2012 # by: The Resource Compiler for PyQt (Qt v4.8.2) # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore qt_resource_data = "\ \x00\x00\x0e\x1c\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\ \x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\ \x01\x00\x9a\x9c\x18\x00\x00\x0a\x4f\x69\x43\x43\x50\x50\x68\x6f\ \x74\x6f\x73\x68\x6f\x70\x20\x49\x43\x43\x20\x70\x72\x6f\x66\x69\ \x6c\x65\x00\x00\x78\xda\x9d\x53\x67\x54\x53\xe9\x16\x3d\xf7\xde\ \xf4\x42\x4b\x88\x80\x94\x4b\x6f\x52\x15\x08\x20\x52\x42\x8b\x80\ \x14\x91\x26\x2a\x21\x09\x10\x4a\x88\x21\xa1\xd9\x15\x51\xc1\x11\ \x45\x45\x04\x1b\xc8\xa0\x88\x03\x8e\x8e\x80\x8c\x15\x51\x2c\x0c\ \x8a\x0a\xd8\x07\xe4\x21\xa2\x8e\x83\xa3\x88\x8a\xca\xfb\xe1\x7b\ \xa3\x6b\xd6\xbc\xf7\xe6\xcd\xfe\xb5\xd7\x3e\xe7\xac\xf3\x9d\xb3\ \xcf\x07\xc0\x08\x0c\x96\x48\x33\x51\x35\x80\x0c\xa9\x42\x1e\x11\ \xe0\x83\xc7\xc4\xc6\xe1\xe4\x2e\x40\x81\x0a\x24\x70\x00\x10\x08\ \xb3\x64\x21\x73\xfd\x23\x01\x00\xf8\x7e\x3c\x3c\x2b\x22\xc0\x07\ \xbe\x00\x01\x78\xd3\x0b\x08\x00\xc0\x4d\x9b\xc0\x30\x1c\x87\xff\ \x0f\xea\x42\x99\x5c\x01\x80\x84\x01\xc0\x74\x91\x38\x4b\x08\x80\ \x14\x00\x40\x7a\x8e\x42\xa6\x00\x40\x46\x01\x80\x9d\x98\x26\x53\ \x00\xa0\x04\x00\x60\xcb\x63\x62\xe3\x00\x50\x2d\x00\x60\x27\x7f\ \xe6\xd3\x00\x80\x9d\xf8\x99\x7b\x01\x00\x5b\x94\x21\x15\x01\xa0\ \x91\x00\x20\x13\x65\x88\x44\x00\x68\x3b\x00\xac\xcf\x56\x8a\x45\ \x00\x58\x30\x00\x14\x66\x4b\xc4\x39\x00\xd8\x2d\x00\x30\x49\x57\ \x66\x48\x00\xb0\xb7\x00\xc0\xce\x10\x0b\xb2\x00\x08\x0c\x00\x30\ \x51\x88\x85\x29\x00\x04\x7b\x00\x60\xc8\x23\x23\x78\x00\x84\x99\ \x00\x14\x46\xf2\x57\x3c\xf1\x2b\xae\x10\xe7\x2a\x00\x00\x78\x99\ \xb2\x3c\xb9\x24\x39\x45\x81\x5b\x08\x2d\x71\x07\x57\x57\x2e\x1e\ \x28\xce\x49\x17\x2b\x14\x36\x61\x02\x61\x9a\x40\x2e\xc2\x79\x99\ \x19\x32\x81\x34\x0f\xe0\xf3\xcc\x00\x00\xa0\x91\x15\x11\xe0\x83\ \xf3\xfd\x78\xce\x0e\xae\xce\xce\x36\x8e\xb6\x0e\x5f\x2d\xea\xbf\ \x06\xff\x22\x62\x62\xe3\xfe\xe5\xcf\xab\x70\x40\x00\x00\xe1\x74\ \x7e\xd1\xfe\x2c\x2f\xb3\x1a\x80\x3b\x06\x80\x6d\xfe\xa2\x25\xee\ \x04\x68\x5e\x0b\xa0\x75\xf7\x8b\x66\xb2\x0f\x40\xb5\x00\xa0\xe9\ \xda\x57\xf3\x70\xf8\x7e\x3c\x3c\x45\xa1\x90\xb9\xd9\xd9\xe5\xe4\ \xe4\xd8\x4a\xc4\x42\x5b\x61\xca\x57\x7d\xfe\x67\xc2\x5f\xc0\x57\ \xfd\x6c\xf9\x7e\x3c\xfc\xf7\xf5\xe0\xbe\xe2\x24\x81\x32\x5d\x81\ \x47\x04\xf8\xe0\xc2\xcc\xf4\x4c\xa5\x1c\xcf\x92\x09\x84\x62\xdc\ \xe6\x8f\x47\xfc\xb7\x0b\xff\xfc\x1d\xd3\x22\xc4\x49\x62\xb9\x58\ \x2a\x14\xe3\x51\x12\x71\x8e\x44\x9a\x8c\xf3\x32\xa5\x22\x89\x42\ \x92\x29\xc5\x25\xd2\xff\x64\xe2\xdf\x2c\xfb\x03\x3e\xdf\x35\x00\ \xb0\x6a\x3e\x01\x7b\x91\x2d\xa8\x5d\x63\x03\xf6\x4b\x27\x10\x58\ \x74\xc0\xe2\xf7\x00\x00\xf2\xbb\x6f\xc1\xd4\x28\x08\x03\x80\x68\ \x83\xe1\xcf\x77\xff\xef\x3f\xfd\x47\xa0\x25\x00\x80\x66\x49\x92\ \x71\x00\x00\x5e\x44\x24\x2e\x54\xca\xb3\x3f\xc7\x08\x00\x00\x44\ \xa0\x81\x2a\xb0\x41\x1b\xf4\xc1\x18\x2c\xc0\x06\x1c\xc1\x05\xdc\ \xc1\x0b\xfc\x60\x36\x84\x42\x24\xc4\xc2\x42\x10\x42\x0a\x64\x80\ \x1c\x72\x60\x29\xac\x82\x42\x28\x86\xcd\xb0\x1d\x2a\x60\x2f\xd4\ \x40\x1d\x34\xc0\x51\x68\x86\x93\x70\x0e\x2e\xc2\x55\xb8\x0e\x3d\ \x70\x0f\xfa\x61\x08\x9e\xc1\x28\xbc\x81\x09\x04\x41\xc8\x08\x13\ \x61\x21\xda\x88\x01\x62\x8a\x58\x23\x8e\x08\x17\x99\x85\xf8\x21\ \xc1\x48\x04\x12\x8b\x24\x20\xc9\x88\x14\x51\x22\x4b\x91\x35\x48\ \x31\x52\x8a\x54\x20\x55\x48\x1d\xf2\x3d\x72\x02\x39\x87\x5c\x46\ \xba\x91\x3b\xc8\x00\x32\x82\xfc\x86\xbc\x47\x31\x94\x81\xb2\x51\ \x3d\xd4\x0c\xb5\x43\xb9\xa8\x37\x1a\x84\x46\xa2\x0b\xd0\x64\x74\ \x31\x9a\x8f\x16\xa0\x9b\xd0\x72\xb4\x1a\x3d\x8c\x36\xa1\xe7\xd0\ \xab\x68\x0f\xda\x8f\x3e\x43\xc7\x30\xc0\xe8\x18\x07\x33\xc4\x6c\ \x30\x2e\xc6\xc3\x42\xb1\x38\x2c\x09\x93\x63\xcb\xb1\x22\xac\x0c\ \xab\xc6\x1a\xb0\x56\xac\x03\xbb\x89\xf5\x63\xcf\xb1\x77\x04\x12\ \x81\x45\xc0\x09\x36\x04\x77\x42\x20\x61\x1e\x41\x48\x58\x4c\x58\ \x4e\xd8\x48\xa8\x20\x1c\x24\x34\x11\xda\x09\x37\x09\x03\x84\x51\ \xc2\x27\x22\x93\xa8\x4b\xb4\x26\xba\x11\xf9\xc4\x18\x62\x32\x31\ \x87\x58\x48\x2c\x23\xd6\x12\x8f\x13\x2f\x10\x7b\x88\x43\xc4\x37\ \x24\x12\x89\x43\x32\x27\xb9\x90\x02\x49\xb1\xa4\x54\xd2\x12\xd2\ \x46\xd2\x6e\x52\x23\xe9\x2c\xa9\x9b\x34\x48\x1a\x23\x93\xc9\xda\ \x64\x6b\xb2\x07\x39\x94\x2c\x20\x2b\xc8\x85\xe4\x9d\xe4\xc3\xe4\ \x33\xe4\x1b\xe4\x21\xf2\x5b\x0a\x9d\x62\x40\x71\xa4\xf8\x53\xe2\ \x28\x52\xca\x6a\x4a\x19\xe5\x10\xe5\x34\xe5\x06\x65\x98\x32\x41\ \x55\xa3\x9a\x52\xdd\xa8\xa1\x54\x11\x35\x8f\x5a\x42\xad\xa1\xb6\ \x52\xaf\x51\x87\xa8\x13\x34\x75\x9a\x39\xcd\x83\x16\x49\x4b\xa5\ \xad\xa2\x95\xd3\x1a\x68\x17\x68\xf7\x69\xaf\xe8\x74\xba\x11\xdd\ \x95\x1e\x4e\x97\xd0\x57\xd2\xcb\xe9\x47\xe8\x97\xe8\x03\xf4\x77\ \x0c\x0d\x86\x15\x83\xc7\x88\x67\x28\x19\x9b\x18\x07\x18\x67\x19\ \x77\x18\xaf\x98\x4c\xa6\x19\xd3\x8b\x19\xc7\x54\x30\x37\x31\xeb\ \x98\xe7\x99\x0f\x99\x6f\x55\x58\x2a\xb6\x2a\x7c\x15\x91\xca\x0a\ \x95\x4a\x95\x26\x95\x1b\x2a\x2f\x54\xa9\xaa\xa6\xaa\xde\xaa\x0b\ \x55\xf3\x55\xcb\x54\x8f\xa9\x5e\x53\x7d\xae\x46\x55\x33\x53\xe3\ \xa9\x09\xd4\x96\xab\x55\xaa\x9d\x50\xeb\x53\x1b\x53\x67\xa9\x3b\ \xa8\x87\xaa\x67\xa8\x6f\x54\x3f\xa4\x7e\x59\xfd\x89\x06\x59\xc3\ \x4c\xc3\x4f\x43\xa4\x51\xa0\xb1\x5f\xe3\xbc\xc6\x20\x0b\x63\x19\ \xb3\x78\x2c\x21\x6b\x0d\xab\x86\x75\x81\x35\xc4\x26\xb1\xcd\xd9\ \x7c\x76\x2a\xbb\x98\xfd\x1d\xbb\x8b\x3d\xaa\xa9\xa1\x39\x43\x33\ \x4a\x33\x57\xb3\x52\xf3\x94\x66\x3f\x07\xe3\x98\x71\xf8\x9c\x74\ \x4e\x09\xe7\x28\xa7\x97\xf3\x7e\x8a\xde\x14\xef\x29\xe2\x29\x1b\ \xa6\x34\x4c\xb9\x31\x65\x5c\x6b\xaa\x96\x97\x96\x58\xab\x48\xab\ \x51\xab\x47\xeb\xbd\x36\xae\xed\xa7\x9d\xa6\xbd\x45\xbb\x59\xfb\ \x81\x0e\x41\xc7\x4a\x27\x5c\x27\x47\x67\x8f\xce\x05\x9d\xe7\x53\ \xd9\x53\xdd\xa7\x0a\xa7\x16\x4d\x3d\x3a\xf5\xae\x2e\xaa\x6b\xa5\ \x1b\xa1\xbb\x44\x77\xbf\x6e\xa7\xee\x98\x9e\xbe\x5e\x80\x9e\x4c\ \x6f\xa7\xde\x79\xbd\xe7\xfa\x1c\x7d\x2f\xfd\x54\xfd\x6d\xfa\xa7\ \xf5\x47\x0c\x58\x06\xb3\x0c\x24\x06\xdb\x0c\xce\x18\x3c\xc5\x35\ \x71\x6f\x3c\x1d\x2f\xc7\xdb\xf1\x51\x43\x5d\xc3\x40\x43\xa5\x61\ \x95\x61\x97\xe1\x84\x91\xb9\xd1\x3c\xa3\xd5\x46\x8d\x46\x0f\x8c\ \x69\xc6\x5c\xe3\x24\xe3\x6d\xc6\x6d\xc6\xa3\x26\x06\x26\x21\x26\ \x4b\x4d\xea\x4d\xee\x9a\x52\x4d\xb9\xa6\x29\xa6\x3b\x4c\x3b\x4c\ \xc7\xcd\xcc\xcd\xa2\xcd\xd6\x99\x35\x9b\x3d\x31\xd7\x32\xe7\x9b\ \xe7\x9b\xd7\x9b\xdf\xb7\x60\x5a\x78\x5a\x2c\xb6\xa8\xb6\xb8\x65\ \x49\xb2\xe4\x5a\xa6\x59\xee\xb6\xbc\x6e\x85\x5a\x39\x59\xa5\x58\ \x55\x5a\x5d\xb3\x46\xad\x9d\xad\x25\xd6\xbb\xad\xbb\xa7\x11\xa7\ \xb9\x4e\x93\x4e\xab\x9e\xd6\x67\xc3\xb0\xf1\xb6\xc9\xb6\xa9\xb7\ \x19\xb0\xe5\xd8\x06\xdb\xae\xb6\x6d\xb6\x7d\x61\x67\x62\x17\x67\ \xb7\xc5\xae\xc3\xee\x93\xbd\x93\x7d\xba\x7d\x8d\xfd\x3d\x07\x0d\ \x87\xd9\x0e\xab\x1d\x5a\x1d\x7e\x73\xb4\x72\x14\x3a\x56\x3a\xde\ \x9a\xce\x9c\xee\x3f\x7d\xc5\xf4\x96\xe9\x2f\x67\x58\xcf\x10\xcf\ \xd8\x33\xe3\xb6\x13\xcb\x29\xc4\x69\x9d\x53\x9b\xd3\x47\x67\x17\ \x67\xb9\x73\x83\xf3\x88\x8b\x89\x4b\x82\xcb\x2e\x97\x3e\x2e\x9b\ \x1b\xc6\xdd\xc8\xbd\xe4\x4a\x74\xf5\x71\x5d\xe1\x7a\xd2\xf5\x9d\ \x9b\xb3\x9b\xc2\xed\xa8\xdb\xaf\xee\x36\xee\x69\xee\x87\xdc\x9f\ \xcc\x34\x9f\x29\x9e\x59\x33\x73\xd0\xc3\xc8\x43\xe0\x51\xe5\xd1\ \x3f\x0b\x9f\x95\x30\x6b\xdf\xac\x7e\x4f\x43\x4f\x81\x67\xb5\xe7\ \x23\x2f\x63\x2f\x91\x57\xad\xd7\xb0\xb7\xa5\x77\xaa\xf7\x61\xef\ \x17\x3e\xf6\x3e\x72\x9f\xe3\x3e\xe3\x3c\x37\xde\x32\xde\x59\x5f\ \xcc\x37\xc0\xb7\xc8\xb7\xcb\x4f\xc3\x6f\x9e\x5f\x85\xdf\x43\x7f\ \x23\xff\x64\xff\x7a\xff\xd1\x00\xa7\x80\x25\x01\x67\x03\x89\x81\ \x41\x81\x5b\x02\xfb\xf8\x7a\x7c\x21\xbf\x8e\x3f\x3a\xdb\x65\xf6\ \xb2\xd9\xed\x41\x8c\xa0\xb9\x41\x15\x41\x8f\x82\xad\x82\xe5\xc1\ \xad\x21\x68\xc8\xec\x90\xad\x21\xf7\xe7\x98\xce\x91\xce\x69\x0e\ \x85\x50\x7e\xe8\xd6\xd0\x07\x61\xe6\x61\x8b\xc3\x7e\x0c\x27\x85\ \x87\x85\x57\x86\x3f\x8e\x70\x88\x58\x1a\xd1\x31\x97\x35\x77\xd1\ \xdc\x43\x73\xdf\x44\xfa\x44\x96\x44\xde\x9b\x67\x31\x4f\x39\xaf\ \x2d\x4a\x35\x2a\x3e\xaa\x2e\x6a\x3c\xda\x37\xba\x34\xba\x3f\xc6\ \x2e\x66\x59\xcc\xd5\x58\x9d\x58\x49\x6c\x4b\x1c\x39\x2e\x2a\xae\ \x36\x6e\x6c\xbe\xdf\xfc\xed\xf3\x87\xe2\x9d\xe2\x0b\xe3\x7b\x17\ \x98\x2f\xc8\x5d\x70\x79\xa1\xce\xc2\xf4\x85\xa7\x16\xa9\x2e\x12\ \x2c\x3a\x96\x40\x4c\x88\x4e\x38\x94\xf0\x41\x10\x2a\xa8\x16\x8c\ \x25\xf2\x13\x77\x25\x8e\x0a\x79\xc2\x1d\xc2\x67\x22\x2f\xd1\x36\ \xd1\x88\xd8\x43\x5c\x2a\x1e\x4e\xf2\x48\x2a\x4d\x7a\x92\xec\x91\ \xbc\x35\x79\x24\xc5\x33\xa5\x2c\xe5\xb9\x84\x27\xa9\x90\xbc\x4c\ \x0d\x4c\xdd\x9b\x3a\x9e\x16\x9a\x76\x20\x6d\x32\x3d\x3a\xbd\x31\ \x83\x92\x91\x90\x71\x42\xaa\x21\x4d\x93\xb6\x67\xea\x67\xe6\x66\ \x76\xcb\xac\x65\x85\xb2\xfe\xc5\x6e\x8b\xb7\x2f\x1e\x95\x07\xc9\ \x6b\xb3\x90\xac\x05\x59\x2d\x0a\xb6\x42\xa6\xe8\x54\x5a\x28\xd7\ \x2a\x07\xb2\x67\x65\x57\x66\xbf\xcd\x89\xca\x39\x96\xab\x9e\x2b\ \xcd\xed\xcc\xb3\xca\xdb\x90\x37\x9c\xef\x9f\xff\xed\x12\xc2\x12\ \xe1\x92\xb6\xa5\x86\x4b\x57\x2d\x1d\x58\xe6\xbd\xac\x6a\x39\xb2\ \x3c\x71\x79\xdb\x0a\xe3\x15\x05\x2b\x86\x56\x06\xac\x3c\xb8\x8a\ \xb6\x2a\x6d\xd5\x4f\xab\xed\x57\x97\xae\x7e\xbd\x26\x7a\x4d\x6b\ \x81\x5e\xc1\xca\x82\xc1\xb5\x01\x6b\xeb\x0b\x55\x0a\xe5\x85\x7d\ \xeb\xdc\xd7\xed\x5d\x4f\x58\x2f\x59\xdf\xb5\x61\xfa\x86\x9d\x1b\ \x3e\x15\x89\x8a\xae\x14\xdb\x17\x97\x15\x7f\xd8\x28\xdc\x78\xe5\ \x1b\x87\x6f\xca\xbf\x99\xdc\x94\xb4\xa9\xab\xc4\xb9\x64\xcf\x66\ \xd2\x66\xe9\xe6\xde\x2d\x9e\x5b\x0e\x96\xaa\x97\xe6\x97\x0e\x6e\ \x0d\xd9\xda\xb4\x0d\xdf\x56\xb4\xed\xf5\xf6\x45\xdb\x2f\x97\xcd\ \x28\xdb\xbb\x83\xb6\x43\xb9\xa3\xbf\x3c\xb8\xbc\x65\xa7\xc9\xce\ \xcd\x3b\x3f\x54\xa4\x54\xf4\x54\xfa\x54\x36\xee\xd2\xdd\xb5\x61\ \xd7\xf8\x6e\xd1\xee\x1b\x7b\xbc\xf6\x34\xec\xd5\xdb\x5b\xbc\xf7\ \xfd\x3e\xc9\xbe\xdb\x55\x01\x55\x4d\xd5\x66\xd5\x65\xfb\x49\xfb\ \xb3\xf7\x3f\xae\x89\xaa\xe9\xf8\x96\xfb\x6d\x5d\xad\x4e\x6d\x71\ \xed\xc7\x03\xd2\x03\xfd\x07\x23\x0e\xb6\xd7\xb9\xd4\xd5\x1d\xd2\ \x3d\x54\x52\x8f\xd6\x2b\xeb\x47\x0e\xc7\x1f\xbe\xfe\x9d\xef\x77\ \x2d\x0d\x36\x0d\x55\x8d\x9c\xc6\xe2\x23\x70\x44\x79\xe4\xe9\xf7\ \x09\xdf\xf7\x1e\x0d\x3a\xda\x76\x8c\x7b\xac\xe1\x07\xd3\x1f\x76\ \x1d\x67\x1d\x2f\x6a\x42\x9a\xf2\x9a\x46\x9b\x53\x9a\xfb\x5b\x62\ \x5b\xba\x4f\xcc\x3e\xd1\xd6\xea\xde\x7a\xfc\x47\xdb\x1f\x0f\x9c\ \x34\x3c\x59\x79\x4a\xf3\x54\xc9\x69\xda\xe9\x82\xd3\x93\x67\xf2\ \xcf\x8c\x9d\x95\x9d\x7d\x7e\x2e\xf9\xdc\x60\xdb\xa2\xb6\x7b\xe7\ \x63\xce\xdf\x6a\x0f\x6f\xef\xba\x10\x74\xe1\xd2\x45\xff\x8b\xe7\ \x3b\xbc\x3b\xce\x5c\xf2\xb8\x74\xf2\xb2\xdb\xe5\x13\x57\xb8\x57\ \x9a\xaf\x3a\x5f\x6d\xea\x74\xea\x3c\xfe\x93\xd3\x4f\xc7\xbb\x9c\ \xbb\x9a\xae\xb9\x5c\x6b\xb9\xee\x7a\xbd\xb5\x7b\x66\xf7\xe9\x1b\ \x9e\x37\xce\xdd\xf4\xbd\x79\xf1\x16\xff\xd6\xd5\x9e\x39\x3d\xdd\ \xbd\xf3\x7a\x6f\xf7\xc5\xf7\xf5\xdf\x16\xdd\x7e\x72\x27\xfd\xce\ \xcb\xbb\xd9\x77\x27\xee\xad\xbc\x4f\xbc\x5f\xf4\x40\xed\x41\xd9\ \x43\xdd\x87\xd5\x3f\x5b\xfe\xdc\xd8\xef\xdc\x7f\x6a\xc0\x77\xa0\ \xf3\xd1\xdc\x47\xf7\x06\x85\x83\xcf\xfe\x91\xf5\x8f\x0f\x43\x05\ \x8f\x99\x8f\xcb\x86\x0d\x86\xeb\x9e\x38\x3e\x39\x39\xe2\x3f\x72\ \xfd\xe9\xfc\xa7\x43\xcf\x64\xcf\x26\x9e\x17\xfe\xa2\xfe\xcb\xae\ \x17\x16\x2f\x7e\xf8\xd5\xeb\xd7\xce\xd1\x98\xd1\xa1\x97\xf2\x97\ \x93\xbf\x6d\x7c\xa5\xfd\xea\xc0\xeb\x19\xaf\xdb\xc6\xc2\xc6\x1e\ \xbe\xc9\x78\x33\x31\x5e\xf4\x56\xfb\xed\xc1\x77\xdc\x77\x1d\xef\ \xa3\xdf\x0f\x4f\xe4\x7c\x20\x7f\x28\xff\x68\xf9\xb1\xf5\x53\xd0\ \xa7\xfb\x93\x19\x93\x93\xff\x04\x03\x98\xf3\xfc\x63\x33\x2d\xdb\ \x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8e\x7c\xfb\x51\x93\ \x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x25\x00\x00\x80\x83\ \x00\x00\xf9\xff\x00\x00\x80\xe9\x00\x00\x75\x30\x00\x00\xea\x60\ \x00\x00\x3a\x98\x00\x00\x17\x6f\x92\x5f\xc5\x46\x00\x00\x03\x37\ \x49\x44\x41\x54\x78\xda\x74\x92\x5b\x68\xdb\x65\x18\x87\x9f\xef\ \x7f\xca\xc9\x2e\x3d\xe5\xdc\x63\x4c\x4b\x18\x13\x75\x9b\x6e\x15\ \xba\xce\x39\x6d\xa7\x78\xa1\x9b\x68\x5b\x69\x65\xad\x32\xa6\x43\ \x77\x31\x41\x6d\xc1\xf3\x9c\x78\x40\x65\xe8\x60\xc2\x14\xf4\x52\ \xbd\xf0\x42\x57\x90\x4c\x50\x76\xe3\x18\x82\xb3\x36\x5d\x5d\x97\ \xa6\x6b\xb5\x6d\x9a\xa6\x4b\xd2\x24\xff\xef\xf3\x62\x32\x98\xd8\ \xf7\xf2\xc7\xf3\x7b\x5e\x5e\x78\x85\x94\x92\xf5\xa6\x21\x12\x3a\ \x0c\x62\x7a\x26\x3d\xfb\xd5\x7a\x8c\xd1\x1e\x6b\x45\x4a\x49\xa5\ \x5c\x41\x29\x85\x54\x12\x29\x15\xc0\x73\x83\x83\xfd\xef\xcd\xa6\ \x67\x65\x30\xe0\xef\x11\xa8\x31\x84\x40\x08\x10\x08\x74\x5d\xc3\ \x30\x0c\xb4\x75\xc4\x2f\xef\x7f\x62\xe0\xfd\x37\x8e\xbe\xcb\xa7\ \xa7\xbe\xd0\xf6\x74\xdf\xf3\x1d\xf0\xc8\xff\x81\x7a\x6d\x4d\xf5\ \xb5\xcd\xff\x9e\xa2\x50\x6f\x0f\x0f\x0f\xbd\x38\xf2\xea\x5b\x5c\ \x9a\x98\x26\x95\x4e\x33\x30\x34\x2c\xfe\x9c\x18\xdf\x37\x91\x9c\ \xbc\x2c\x04\xe7\x05\x02\x4d\x13\x68\x9a\x06\xb1\x68\x33\xd1\x96\ \x46\x9a\x22\x21\x1a\xc3\xc1\x4f\x5e\x7b\x65\x54\x49\x29\xd5\x1f\ \xbf\x5f\x54\x3b\x76\x6f\x53\xb7\xdf\xb1\x51\xcd\xa4\xe6\x95\x6d\ \xdb\xea\xe0\x53\xfb\x65\x30\x50\x7f\x38\x14\xf0\xd1\x10\x0e\xd0\ \xd2\x14\xb9\x41\xf0\xf9\xb1\xa3\xaf\x2b\x29\xa5\x92\x52\xaa\xe9\ \xe9\x94\xda\xb4\x3d\xa0\x76\xdc\x1f\x53\xa5\xb2\xad\xa4\x94\xca\ \xb6\x6d\xf5\xec\xd3\x07\x54\x28\xe0\x1b\xfd\xaf\xe0\xeb\x8f\x8f\ \x7f\x78\xbd\x7c\x5d\x32\x35\xaf\xa6\xa7\xae\xdc\x90\xd9\xb6\xad\ \x46\x5f\x3a\xa2\x1a\xc2\x81\x77\x5a\x9a\x22\xe8\xb5\x35\xd5\x3f\ \xf6\xf5\xf7\x75\xef\xeb\x1d\xc0\xe5\x72\x62\x57\x14\xa9\xd9\x19\ \xb2\xd9\x2c\x4e\x8f\x85\x6e\x6a\x2c\x2c\x2e\x90\xc9\x64\x70\x3a\ \x5c\x28\x4d\xa7\x3d\x7e\x0b\x8b\x7f\xcf\xdd\x95\x4c\x26\xdb\x8c\ \xfe\xc7\x07\xcf\xb6\xb4\x36\x74\x7c\x76\xea\x84\x31\x73\xf9\x2f\ \x72\x2b\xcb\xf4\xec\xe9\x61\x6d\xad\xcc\x5a\xe1\x2a\x15\xa5\x70\ \x58\x16\x1e\x8f\x87\x44\x22\x01\x40\x73\x73\x84\x9d\xbb\xee\x2e\ \x2d\x2e\x66\x2e\x69\xc0\xf3\xdd\xdd\xbb\x73\x01\x9f\x8f\x73\xbf\ \x9c\xe3\x4c\xe2\x0c\xbd\xbd\x7d\x04\x83\x7e\x3a\xbb\xba\xd8\xd9\ \xd5\x45\x38\x1c\x66\xef\x43\x7b\x19\x1f\x4f\x72\xfa\xf4\x0f\x04\ \xfd\x3e\xb6\x77\x6c\x99\x05\x46\x8c\x43\xcf\x0c\x01\x50\x5f\x1f\ \xe4\xa6\x3a\xc9\x46\x5f\x0c\x87\xc3\xa2\x50\x28\x10\x8b\xc5\x28\ \x96\xcb\xcc\xcf\xcd\x53\xe5\xad\xa2\xba\xce\x40\x77\xd5\x13\x6b\ \x8b\x63\x99\x06\x1f\x7c\x74\x0c\x63\x35\xb7\x06\xa0\x4c\xdd\x24\ \x93\x4f\x13\x6b\xbb\x99\xfc\x6a\x09\xbb\x62\xb3\x92\xcb\x91\xcd\ \x5d\x65\x35\x97\x23\xbb\x94\xa7\x68\x2f\x53\xe3\xb7\xb0\x4c\x8b\ \xa5\xa5\xcc\xb5\x57\x7e\xf8\xc0\x9d\x1c\x39\xf4\x82\xa8\x0d\xf8\ \xf9\xf5\xa7\x2b\x94\xb2\xa0\xb4\x12\x0a\x85\x61\x9a\x54\x7b\x37\ \x60\x39\x5d\x78\x6b\xdd\x9c\x38\xfe\x25\x63\x67\x4f\xb2\xa1\xc6\ \xa4\x54\xb0\xe5\x85\x99\xef\x11\x9b\xef\xad\x7e\xa0\x54\x28\x7f\ \x5b\x28\x17\xe9\xdc\x75\x2b\x9b\xa2\xdb\xb8\xad\xe9\x51\x32\xb9\ \x05\xa2\xd1\x28\xb6\xad\x98\x9a\xbc\x88\xc7\x9f\x67\xe4\xcd\x83\ \x4c\xfe\x96\xa7\xb3\xa7\x81\xc7\x1e\x7c\x72\xce\x61\x6a\x8d\x22\ \x1e\x0f\x6d\x71\x7b\xf5\x84\xae\x1b\xa6\x2d\xca\x9a\xa5\x5b\xca\ \xeb\x6e\x5f\xde\xba\x79\x6b\x55\xa1\x58\x28\x01\xb8\x9c\x6e\xf3\ \xe7\xf3\xdf\xe8\x4a\x95\x45\x39\xa7\xec\x82\x5c\x35\x57\x8b\x2b\ \xaa\x26\xe0\x6c\x13\xf1\x78\x08\xb7\x57\x27\x31\x76\xc1\xad\x69\ \x9a\x5b\x17\xba\xc7\xe1\x72\x14\x2b\x95\x8a\x53\x4a\x69\x00\x68\ \x9a\x56\x11\x42\x37\x50\xe4\x11\x64\x52\xe9\xa9\xba\xfb\x7a\x3a\ \xaa\xea\x5a\x19\xff\x67\x00\x67\x7b\x7d\x9f\x94\x3d\xd8\xe8\x00\ \x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ \x00\x00\x1f\xdd\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\ \x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\ \x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\ \x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\ \x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x09\x19\ \x0f\x31\x0d\x3a\x99\x30\xa2\x00\x00\x00\x08\x74\x45\x58\x74\x43\ \x6f\x6d\x6d\x65\x6e\x74\x00\xf6\xcc\x96\xbf\x00\x00\x1f\x49\x49\ \x44\x41\x54\x78\xda\xdd\x7b\x69\x78\x5c\xc5\x95\xf6\x7b\xaa\xea\ \xf6\xaa\x6e\xed\x6a\xad\xb6\x6c\x79\x95\x2d\x4b\xb6\xf1\x06\x36\ \xd8\x06\x6f\x30\x01\x0c\x84\x78\x60\x08\x24\x2c\x61\xc9\x24\x84\ \x84\x90\x99\x04\x42\x12\x32\x99\x21\x10\x18\xb6\x61\x4b\x02\x04\ \x08\x5b\xb0\xc1\x36\x78\xc3\xfb\x22\xbc\x80\x77\x63\x2d\xd6\x62\ \xed\xbb\xba\xa5\xee\xbe\x7d\xef\xad\xf3\xfd\x68\x89\xe1\xcb\x97\ \xcc\x17\x0c\x49\x26\x39\xcf\xf3\x3e\x55\xb7\x54\xba\x7d\xcf\x5b\ \xa7\x4e\x9d\x3a\x75\x2f\xe1\x33\xc8\xf7\xfe\xe5\x1e\xfa\xf7\x9f\ \xfd\x98\x87\xaf\x8b\x47\x8f\xcf\xbf\xf2\xca\x2b\xa7\x8e\x1e\x55\ \x3c\x25\x33\x33\x63\x52\x5a\x5a\x5a\x9e\xdb\xe5\xca\x96\x4a\x78\ \x08\xc4\x09\x2b\x41\xa6\x99\xe8\xef\xed\xed\xed\x6e\x6f\xef\xa8\ \x6f\x6a\x6a\x39\xb6\x6f\xff\xfe\xbd\x5b\x37\xaf\xaf\x02\xd0\x8b\ \xbf\x82\xd0\xe7\x70\x8f\xd4\xbb\xef\xbd\xef\x92\x59\x33\xa6\xff\ \x63\x71\xf1\xc8\x8a\xd4\x60\x30\x57\x08\x42\x3c\x16\x47\x2c\x16\ \x43\x3c\x16\x83\x65\x59\x60\xd6\x30\x5c\x2e\x78\x3d\x5e\x78\x3c\ \x1e\x78\x7d\x5e\x38\x9a\xd1\xdb\xdb\x87\xfa\xfa\x86\xba\xbd\xfb\ \xf7\xbf\xb7\x75\xeb\xf6\xd7\x76\xed\xd8\xbc\xf1\x6f\x82\x80\xb3\ \x66\xce\x2d\xbe\xfe\xfa\xeb\xbe\x36\x7d\xda\xd4\x9b\x46\x14\x15\ \x66\x58\x09\x13\x55\x27\x4f\xe2\xc8\xe1\xc3\xa8\xab\x6f\x68\x6d\ \x69\x6e\x6e\x3a\xdd\x74\xba\x3b\x1a\x8d\xf5\x31\xeb\x38\x18\x10\ \x52\x4a\xc3\x50\x29\x99\x99\x99\xe9\xa3\x46\x8d\x2a\x28\x2c\x2c\ \x2c\x2c\x2f\x2f\xf7\x4e\x2c\x2d\x45\x30\x35\x0d\xf5\x0d\x8d\xd8\ \xbd\x7b\xcf\xa1\x6d\xdb\xb6\x3f\xbd\xf2\xcd\xd7\x7e\x09\xc0\xfc\ \xdf\x48\x80\xeb\xfe\x07\x1e\xba\x73\xc9\xe2\x0b\xbe\x5d\x90\x9f\ \x97\xde\x58\x57\x8f\xad\xdb\xb6\x62\xcb\xd6\xad\xb5\x55\x27\xab\ \x36\x9e\x3a\x75\xea\x80\xe5\xd8\x0d\x00\x3a\x00\x44\x01\x24\x00\ \x30\x00\x0d\x40\x02\x70\x01\xf0\x00\x48\x03\x50\x90\x93\x9d\x3d\ \xa9\x64\x74\xc9\xbc\x85\x0b\x16\x94\x2f\x5a\xbc\x28\xad\xbc\xa2\ \x02\xcd\x2d\xad\x58\xb3\x76\xdd\xa1\x37\x57\xae\xfa\xc9\xde\xca\ \x1d\xbf\xc3\xff\x16\xb9\xe2\x8b\xff\x78\xce\xba\x75\x1b\x4f\x76\ \x77\x77\xf3\x87\x07\x3e\xe0\x7b\xbe\xff\x7d\x7b\x5a\x79\xc5\x3b\ \x00\xbe\x62\x00\x93\x00\x14\x01\xc8\x01\x90\x45\x44\x19\x52\xca\ \x54\x00\x29\xbf\x87\x20\x11\xa5\x03\xc8\x1c\xea\x5b\x08\x60\x2c\ \x80\x8b\xd3\xd3\xd2\x1e\xbe\xe1\xfa\xeb\x6b\x37\x6f\xda\xc4\xfd\ \xfd\xfd\xbc\x71\xe3\x7b\x7c\xdd\x57\x6f\x7a\x4d\x18\xfe\x3c\x00\ \xb8\x60\xd1\x85\xf4\x57\x53\xfe\xde\x1f\xfd\xf4\x3b\x1f\x7d\x54\ \x95\xe8\x68\x6b\xe7\x27\x1e\x7d\x8c\xcf\x9e\x3d\xa7\x72\x24\x70\ \xcb\x5e\xe0\xb9\xd5\xc0\x15\x86\x61\x64\x67\x00\x41\x00\xea\x93\ \x96\x75\xcf\xbd\x3f\xa5\x83\x47\xab\x45\xe5\x07\x27\xa8\xa9\xd3\ \xa4\xdf\xb3\x3e\x09\xc0\x4f\x44\x59\x6e\x97\x2b\x1f\x40\x31\x80\ \xc5\x59\x99\x99\x4f\x7e\xef\xae\xbb\x3a\xea\x4f\x9d\xe2\xd3\xa7\ \x9b\xf8\x87\x3f\xba\xef\x74\x6a\x5a\x68\xf6\x5f\x4b\x77\xf1\x9f\ \x8f\x3c\xfe\xd4\xe9\xd3\x4d\x7c\xe4\xf0\x21\xbe\xfe\xda\xeb\xc2\ \x5e\xb7\xe7\xee\xe5\x40\x79\x2b\x70\x3c\x0c\xf0\x20\x10\x63\x20\ \x1f\x00\x59\x96\x9d\xc2\xcc\x79\xcc\x3c\x96\x99\x27\x31\x73\xd9\ \x50\x39\x9e\x99\x4b\x98\xb9\x90\x99\xb3\x99\x39\x85\x99\x5d\xff\ \x4d\x87\x50\x44\xc2\x0f\x20\x6b\xc8\x92\x56\xcc\x9e\x39\x6b\xc3\ \xc6\xf5\xeb\x9d\xfe\xfe\x30\x3f\xf2\xe8\x13\xce\x9c\x73\xe6\x7f\ \xe5\x2f\xae\xfd\xe3\x4f\x3c\xfd\x6a\x6f\x6f\x1f\x6f\xde\xb4\x89\ \x2f\x5a\xba\xac\x56\x02\xcb\x01\xe4\x9c\x06\x3d\x1b\x03\x98\x01\ \x76\x88\x58\x4f\x9b\xf6\x1c\x33\xcf\x61\xe6\x45\xcc\xbc\x48\x6b\ \x3d\x5f\x6b\x7d\xb6\xd6\xfa\x2c\xad\x75\xb9\xd6\xba\x54\x6b\x3d\ \x4e\x6b\x3d\x5a\x6b\x3d\x42\x6b\x9d\xaf\xb5\xce\xd1\x5a\xa7\x31\ \xb3\xf7\x63\x07\xe3\xf2\x49\x22\x0a\xba\x92\x16\x71\x4e\x6e\x28\ \xf4\xcc\x2f\x9f\x79\xa6\x2f\x16\x8b\xf1\xd3\xcf\xfc\x8a\x4b\x27\ \x4d\xbd\xee\x2f\xa6\xfc\x2f\x1e\x7e\xe4\xf9\x8e\xce\x2e\x7e\x67\ \xf5\x1a\x9e\x3e\x75\xea\x16\x00\xd3\x42\x80\x7f\x84\xdf\x1f\x70\ \x00\x76\x00\xd6\x63\xc7\x5a\xbc\x6c\x19\xeb\x6f\x7c\x63\x0b\x33\ \x7f\x41\x6b\xbd\x88\x99\xcf\x63\xe6\x39\x5a\xeb\x19\x5a\xeb\x72\ \x66\x9e\xa4\xb5\x1e\xaf\xb5\x2e\xd1\x5a\x8f\x60\xe6\x7c\xad\x39\ \xc4\xac\xb3\x98\x39\x60\x3b\xda\xf5\xfb\xbf\x2d\xa5\x72\x11\x51\ \x10\xc0\x04\x00\x3f\xf8\xcf\x87\x1e\x6a\x1f\x1c\x1c\xe4\xc7\x1e\ \x7f\x92\x67\xce\x9e\xf7\x15\x00\x38\x6f\xfe\xe2\x3f\x9f\x4f\xf8\ \x97\x7f\xbd\xfb\xc7\xad\xad\x6d\xbc\x79\xd3\x26\x3e\x67\xf6\x9c\ \x5d\x00\xc6\xfb\xfd\xfe\x34\x00\x3e\x06\xae\xd2\xa9\xa9\xcc\x17\ \x5e\xd8\xad\x6f\xbf\xfd\x43\xbd\x7c\x39\xeb\x9b\x6e\x5a\xc7\xcc\ \x8b\xb5\xd6\xf3\x99\xf9\x1c\x66\x9e\xc9\xcc\xd3\xb4\xd6\x93\x99\ \x79\x82\xd6\x7a\x0c\x33\x17\x6b\xd6\x85\xac\x39\x8f\x59\x67\x6a\ \xad\x3d\xc3\xbf\xb7\xb1\x4e\x13\x00\xbc\xf9\x81\x45\x6b\x0e\xc7\ \x09\x00\x0a\x0a\x46\x08\xa5\x54\x2a\x80\x31\x86\x52\xf7\x3e\xf9\ \xf8\xe3\x6d\xb1\x68\x8c\xef\xfd\xd1\x4f\x9d\x60\x5a\x68\xd6\xe7\ \xa1\xa7\xfa\x43\x8d\x97\x7d\xf1\xaa\x8b\x57\xac\xf8\xd2\xdd\x9d\ \xed\xed\xf8\xe9\x7d\x3f\x3d\xb8\xab\x72\xcf\x8d\x00\xea\x07\x07\ \x07\x09\x40\x1c\x33\x67\x2e\xa3\x71\xe3\x80\x94\x94\x18\xfc\xfe\ \x93\x64\xdb\x15\x1c\x8f\xbb\x01\xd8\x44\xe4\x30\xb3\x03\xc0\x21\ \x22\x87\x88\x34\x33\x6b\x02\x1c\x06\x1c\x62\x68\x26\xc4\x4d\xa6\ \x7e\xaf\x20\x0d\x00\x07\xc3\x9a\x1c\x9b\xb0\xa7\x4d\x93\x69\x12\ \x22\xe1\xe4\x73\x34\x37\x37\x6a\x00\xfd\x81\x40\xd0\x8c\x44\xc2\ \xbf\xbd\xf9\xb6\xdb\x10\xca\xcb\xfb\xe6\x2d\x37\xdf\x90\xd6\xd4\ \xdc\xfc\xfa\x3b\xef\xac\x9b\xd5\xd2\x74\xaa\xf5\x33\x39\xb8\x3f\ \xd0\x96\x75\xdd\x97\xff\xe9\x91\xbc\x50\x0e\x1e\x7b\xec\xb1\xbe\ \xf7\xb6\x6e\xb9\x5d\x4a\xd9\x38\xe4\xb5\xe3\xcc\x5c\x86\x85\x0b\ \xcb\x98\x19\x50\xaa\x01\x89\x04\xb1\x6d\x03\x8e\xe3\x00\xb0\x87\ \x95\x1f\x06\x33\x3b\x20\x72\x00\x68\x02\x18\xa0\x01\x41\xd4\x3b\ \xac\x3c\x00\x54\x04\x05\x4f\xcf\x20\x9e\x93\x2b\x78\xfe\x48\xe2\ \x2f\x94\x19\xfc\xc9\x07\x8a\x44\xc2\x71\x97\xa1\x4e\x03\x78\xfb\ \x8e\x3b\xee\x78\xa3\xbb\xb3\xd3\xbe\xe1\xab\xd7\x15\x95\x96\x4e\ \xfc\xc5\x67\xf6\xf0\x1f\x2f\x73\xf7\xfd\x3b\x01\xc0\x0f\x7f\x74\ \xdf\x03\x67\xcf\x99\x35\xf2\xd5\x57\x7e\x8b\x17\x7e\xf3\xc2\x5d\ \x00\x76\x3a\x8e\xc3\x43\xca\x4f\x42\x6b\xeb\x62\xee\xec\x2c\x86\ \x69\x02\xff\xf0\x0f\xaf\x53\x4a\x4a\x27\x6c\x1b\x64\x59\x72\x58\ \x69\x4a\x2a\x9c\xac\x27\x03\x20\x0d\x40\x6b\xa0\x9f\x04\x85\x6b\ \xb4\xa6\x56\xd6\xd4\xc4\x9a\x1a\x58\x53\x1d\x6b\xaa\x76\x34\x1d\ \x37\x35\x1d\x8a\x68\xda\xd7\xad\x69\x47\xb3\xa6\x4d\x35\x9a\x5e\ \xdf\x9b\x5c\x3a\x6f\xf8\xd7\x17\x63\x86\xcb\x73\xbc\xae\xbe\xfe\ \xf5\x1f\xdc\x7d\xf7\x86\x49\x93\x26\x60\xd9\xd2\xc5\x2b\xc6\x4d\ \x2c\x5f\xfe\xf9\x10\xf0\x83\xef\x71\x6e\x41\xf1\x8c\x45\x17\x9c\ \x7f\x6d\x53\x63\x23\x5e\x79\xe5\x95\xdf\xc5\x4d\x73\x95\x94\x2e\ \x2f\x80\xa8\xed\xf0\x28\x00\x73\x78\xed\xda\x52\x1a\x1c\x48\x85\ \x52\x7d\x3c\x73\xe6\x69\xce\xca\x6a\x83\xd6\x40\x3c\xee\x03\xc0\ \x44\xa4\x87\x22\x3f\x66\x66\x66\x22\x26\x80\x34\x51\xb8\x85\x79\ \xb0\x43\x6b\x19\x04\x84\x60\x08\x05\x08\xc5\x10\x12\x10\x8a\x20\ \x0c\x05\xa1\x0c\x08\xc3\x80\x70\x19\x10\x2e\x17\x44\x20\xd5\x25\ \x7e\xb7\xcf\x91\x57\xdd\xf2\x25\x71\xe7\x43\x87\x2c\xc3\x30\x8e\ \xbf\xb9\x72\xe5\xda\xb7\x57\xbd\xd5\x74\xc5\xe5\xcb\x31\xb9\x74\ \xe2\x3d\x00\x8c\xcf\x65\x0a\xdc\x7a\xf3\xd7\xee\x9d\x30\x7e\x1c\ \x7e\xf7\xc6\x1b\x3d\x07\xf6\xec\x79\x50\x19\x29\x31\xc7\x77\x96\ \xd3\xd4\xc3\x01\xc9\xce\x7c\x66\x66\x1c\x38\x30\x9f\x1d\x0d\xe4\ \xe6\xee\xa2\xf4\xf4\x38\x88\x2c\x38\x0e\xb8\xb7\x77\x0a\xbf\xf8\ \x62\xd1\xf0\x68\x83\x48\x13\xc0\x94\x6c\x08\x77\x00\x03\x7e\x22\ \xb9\xb5\x6d\x93\x52\x48\x2a\x2f\x19\xc2\x00\x84\xc1\x10\x06\x41\ \x18\x22\xa9\xb4\xcb\x0d\xe1\xf6\x40\xb8\xbd\x10\x1e\x0f\x0b\x5f\ \x0a\x89\xe6\x7a\x5b\x5c\x7b\xeb\x38\x5a\xf1\xf5\x17\xda\x01\x54\ \xfe\xdb\xbf\xfd\x6c\x9d\xdb\x65\xf0\xd2\xa5\x8b\x2b\xa6\xcd\x38\ \xfb\xda\xcf\x4c\x40\x5e\xfe\xc8\x59\xe7\x9e\x3b\x77\x71\x63\x7d\ \x1d\x36\x6f\xd9\xf2\x7a\x1c\x38\x16\x0c\x4d\x93\xdf\xbd\xff\x37\ \xba\x33\x06\x9b\xb5\xee\x26\x22\x86\x69\x7a\x60\xdb\x80\xdf\xdf\ \xcd\xcc\x1a\x3e\xdf\x20\x31\x03\xb6\xed\xa1\x86\x86\x74\x1e\x12\ \x70\x72\x1a\x33\x73\x6c\x90\x39\xd2\x39\xd0\x60\x28\x66\xb9\x24\ \xf7\x02\x92\x44\x52\x02\x52\x12\x49\x31\x54\x4a\x40\x2a\x90\x34\ \x08\xd2\x30\x48\x1a\x06\xa4\xcb\x20\xe9\x72\x41\xba\xdd\x24\xbd\ \x3e\x25\x8f\x7d\x08\x35\x6f\xc9\x0a\x09\x4f\xc9\xa9\x63\x27\x8e\ \xef\xdf\xb0\x61\xfd\x89\xf3\x17\xce\x47\x51\x61\xe1\xad\x9f\x99\ \x80\x65\x17\x2e\xbd\x75\xec\xd8\x12\xb5\x63\xf3\x96\xde\x3b\xf7\ \xec\x9e\x13\x01\xf5\xd7\xf4\x1c\xba\x7b\x74\xc5\x68\x54\x6e\xad\ \xd3\xcd\x35\xed\xbb\x18\xdc\x45\xf1\x78\x08\xcc\xa0\x69\xd3\x0e\ \x10\x91\xc6\xd5\x57\x1f\x67\x8f\xa7\x1a\xcc\xe0\x81\x01\x0f\x11\ \x69\x22\x62\x10\x31\x13\xd9\x26\x51\x3f\x01\xa2\x24\xe2\x92\x63\ \xd7\x4c\xfc\x72\xee\xaa\x82\xc8\x84\x35\xa5\x5f\x12\x80\xe8\x8e\ \xb5\x4b\x09\x08\xc5\x2c\x14\xff\x77\x69\x10\x0b\xc3\x05\x61\xb8\ \x58\xb8\x5c\x10\xfe\x70\xab\x0c\x84\xdb\x24\x6b\x16\xc1\x20\x44\ \x63\xac\x26\x0a\xe0\xf0\xa3\x8f\x3e\xb6\x37\x23\x3d\x1d\x53\xa7\ \x96\x4f\x1d\x3d\xa6\xf4\xfc\xcf\x42\x40\xc6\xd4\x8a\xf2\xa5\x70\ \x1c\x98\x6f\xbf\xed\xaf\x00\xa6\xb0\xe1\x81\x9f\xf8\x8e\xeb\x9f\ \xbe\xe1\xd6\xfc\xa9\xc5\x72\xb7\x2a\x8c\xc6\xef\xb8\xb3\x1d\xd1\ \x41\x49\x00\xf3\xf2\xe5\xd5\x00\x34\xa4\xd4\x10\x62\x00\xcc\x18\ \xf2\xf6\x60\x80\x89\x99\x75\x7b\xfb\x80\xaa\xa8\x28\x77\xdf\x72\ \x4b\x08\xb9\xb9\x22\xae\x63\x5f\x97\x24\x55\xc4\x8a\xfc\xfa\x87\ \x47\xee\xcd\x08\x79\x43\x42\x12\x49\x31\x04\x39\x0c\x90\x94\x82\ \xa4\x52\x24\x95\x41\x92\xf3\xf3\x04\xe7\xe7\x0a\xb7\x9b\xa4\x6d\ \x6b\x69\x00\xf2\x9c\x45\x5f\x69\xdb\xb7\x7f\xff\xd1\xe3\xc7\x8f\ \x76\xcc\x9a\x35\x13\xa1\x50\xce\xf2\x33\x26\x20\x14\x2a\x58\x5a\ \x51\x5e\x9e\x53\x5d\x5d\x8d\x9c\x43\x47\x5c\x01\xe9\x06\x84\x3a\ \x2c\x72\x43\x50\x5e\xf7\xcf\x96\xb4\xed\x98\x2e\x5c\x10\xf4\xda\ \x6b\x13\xf5\x87\x87\xc0\xb6\x3d\x40\x00\xd8\xb2\x98\x88\x34\x29\ \x15\x21\x66\xa0\xb3\x33\x0b\x00\x63\x60\x40\xf0\x25\x97\x2c\xa0\ \x09\x13\xde\x13\x75\x75\x3b\xe5\x2b\xaf\xd4\xfd\xd7\xd7\x46\x3d\ \x0a\xa0\x4c\x92\x80\x14\x82\x5e\xaa\x7f\xe9\x59\xc1\x2c\x89\x59\ \x0a\x66\x29\x3f\x51\x66\x5d\x76\xd9\xd8\xbc\x05\x0b\xa6\x65\x3e\ \xf8\xef\xe9\x59\x2f\xff\x32\x30\x6d\x62\xf6\x3b\x73\xa7\x66\xd4\ \x2c\x5c\x54\xf4\x63\xb7\x87\x64\xe5\xe6\x5e\xd7\xfd\xcf\xfc\xaa\ \x07\x50\x75\xeb\xd7\x6f\xfc\x68\xd2\xc4\x09\x28\x28\xc8\x5f\x32\ \xa5\x62\x66\xea\x19\x11\x30\xa5\xbc\x7c\x61\x61\x61\x21\xaa\xab\ \x6a\x3a\xf3\xa3\x31\xcd\x5e\xff\xda\x7b\x1f\x6d\x3d\x97\xc6\x8e\ \x69\x81\x6d\xb9\xdd\x7b\xf7\xdc\xba\x30\x0f\x21\xe1\xd8\x93\x74\ \x5f\x2f\x74\x75\xf5\xe9\xa1\xa0\x07\x43\x9e\x3e\x0e\x00\xa8\xa9\ \x59\xc4\xf7\xdf\x3f\x81\x2e\xb8\xe0\x49\x54\x56\x3e\x41\x44\x93\ \x90\x1c\x4f\xec\x19\xe1\x5c\x2b\x20\x20\x21\x07\x04\x49\xd3\x62\ \x67\xc9\xd7\x9f\x5b\xb0\xc8\x37\x6a\xd4\x8f\xfd\x79\x79\xab\x03\ \xf9\xf9\x6f\xa4\x7c\xe1\x0b\x65\x19\x85\x85\xcf\x1a\xdb\xb6\xed\ \x76\x1f\x3a\xbc\x35\xfd\xc1\x07\xb7\x67\xff\xdb\x4f\xfe\x95\xb4\ \x9e\x0b\xa2\x1c\x15\x8b\xde\xbe\xe8\xaa\xf2\x2f\x46\x54\xba\x70\ \x05\xa1\xcb\x66\xfd\x43\xe7\xc1\x43\x47\x1a\xbd\x1e\x37\x46\x8c\ \x18\x51\xc2\xe0\xf1\x67\x42\x40\xea\xb8\x71\x63\xcf\xf2\x79\xdd\ \xa8\xab\xaf\xaf\xb9\xba\xf0\xac\x8a\xbb\xbe\xb5\xe6\x2b\x17\xce\ \xf1\xfb\xe9\xea\xab\x5e\x40\x22\x01\x1c\x3d\xf2\xa5\x0c\x17\x4f\ \x11\x59\x99\x93\x60\x18\xd0\xa6\xb9\x3e\x0a\xd4\xb2\x94\x16\x01\ \xc0\x65\x97\x3d\xcf\x5a\x03\xfd\xfd\x73\xf0\xf2\xcb\xaf\xe8\xce\ \xce\x39\x60\xb6\xe1\xf7\xef\x71\x2a\xca\x57\x20\x10\x58\xb3\xbd\ \xdc\x03\x25\x14\x66\xd5\x53\x22\x27\xa6\x5a\x95\x50\x28\x38\x70\ \xea\x35\x8a\x46\xff\x99\xb4\x9e\x0b\xc7\x39\x5f\xed\xdd\xbb\x9d\ \x2c\xeb\x72\x92\xd2\xcd\x4a\x02\x82\x46\x8a\x58\xf4\x36\x28\x09\ \x18\x46\x1f\x2b\xa5\x3d\x3d\x6d\x4f\x9f\x7d\xff\xf5\x13\x3e\x3a\ \x0a\xd7\xe5\xff\x78\x5b\x67\x67\x4f\xb8\x39\x1c\x09\xf7\x8f\x29\ \x19\x4d\x86\x32\xce\x3a\x13\x02\x8a\x0a\x0a\x0a\x0a\xe2\xf1\x38\ \xfa\xfa\x23\x4d\x65\x63\xa6\x24\x82\xee\xa0\x18\xe1\x8d\xa4\x89\ \x7f\xba\xfa\x23\x06\x0d\x70\xc2\x02\x7e\xfe\xe0\x85\xb2\xb4\x74\ \x24\x79\x7d\x70\x46\x8d\xda\x17\x06\x06\x4c\xa0\x09\x96\xc5\x74\ \xed\xb5\xf5\x18\x39\xf2\x37\x24\x04\x88\x99\x01\x1c\xd0\xe7\x9d\ \x77\xb1\xd5\xd8\x78\xb9\x7e\x6f\xf3\xf6\x59\xbf\x1a\xb3\x55\x92\ \x84\x24\x89\x37\x7e\x11\xcd\x38\xeb\x94\x53\x2c\x49\x02\x4a\x81\ \xdd\xee\x5a\x6b\x4c\xc9\xb7\xd8\xe3\x39\xce\x52\x82\xfd\xfe\xdd\ \xd6\xec\xd9\x97\x47\xbf\xf9\xcd\xa9\xec\x4f\xd9\xca\x52\x82\xa5\ \x44\x64\xf6\xdc\x6b\x1c\x9f\x7f\x07\x0b\x89\xec\xa3\xbb\xbe\x16\ \x1d\x84\x31\xed\xec\x0b\x62\x19\xfe\xb4\x48\x4f\x77\x77\x5f\x5e\ \x6e\x2e\x3c\x1e\xf7\xe4\x33\xd9\x0b\x14\x17\x16\x14\xa4\xc6\x62\ \x51\x98\x96\xee\x2c\x2c\x3f\x2f\xee\x09\x15\x8a\x40\x56\x20\x05\ \x60\xa2\x92\x92\x0d\x7c\xe8\xd0\x65\xbc\x7f\xdf\x15\xd0\x5a\x8a\ \x71\x63\x41\x93\x27\x47\x88\x48\x0d\x02\x31\x18\x46\x8b\x1b\xc8\ \xa4\xd7\x5f\x7f\x0c\xc0\x7f\x6a\x20\x6a\x11\x85\x87\x13\x1e\x14\ \x8b\x51\x5f\xa2\x6f\x89\x24\x89\xa2\x6e\x0e\x7b\xb5\x0a\x8e\x6b\ \x23\xde\x3a\x45\xd2\xdb\xe7\xf8\x9a\xbe\xff\xf0\xd1\xf9\x51\x3b\ \x4a\xfe\x95\x6b\x37\xa0\xaf\x4f\x99\x37\xde\xd8\xe3\x30\xc3\x22\ \x82\x9d\x99\x79\xbb\xf7\x97\xbf\x5c\x61\x8e\x2e\xd9\xdb\xf0\xf4\ \x2b\x07\x8b\xae\xb9\xf2\x51\xff\xde\x3d\xe7\xa9\xd8\xc0\xb5\x97\ \x7c\x73\x66\x73\xfc\xf2\x2b\x56\xde\xd6\xdb\xf9\xb5\xc1\x8f\x4e\ \x7a\xb2\x46\x95\xc0\xe3\xf1\x8c\x06\x80\x0b\x16\x5f\x44\x9b\x36\ \xac\xe5\x3f\xc9\x02\x48\xa8\x5c\xaf\xcf\x6b\x24\x12\x96\x36\x5c\ \xbe\x78\x7e\xd1\x58\xf8\xfc\x5e\xe5\x75\xc1\x07\x40\xf3\x98\xb1\ \x27\x88\x19\x94\x48\x04\x11\x8f\x07\xc8\xe3\xd1\xae\x59\xb3\x72\ \xfd\xcc\x7e\x62\x56\x09\xc0\x89\x03\xdd\x0c\x58\x88\xc5\x94\x0e\ \x87\x35\x62\x31\x83\x92\xe4\x2a\x78\x3c\x52\x90\x48\x57\x24\x91\ \x55\x52\x71\x97\xb3\x62\xc5\x92\xc5\xd9\x0b\xae\x54\x24\x39\xec\ \x17\x85\x2f\xd6\xbf\x1c\xf2\x29\x9f\x74\xae\xb8\x22\x6a\xdf\x78\ \x63\x18\xb1\x98\x01\x22\x25\x98\x55\xec\xe6\x9b\xfb\x5b\xf7\xed\ \x7d\xaa\xed\xc5\x57\x3e\x14\xf1\x98\x51\xfd\xd4\x6b\x07\x22\xa5\ \xe5\x5f\x67\x69\xc0\xdb\xdf\xf9\xbd\xdc\xe7\x1e\xdb\x97\xe7\xf6\ \x14\x39\x91\x08\x82\xc1\x00\x5c\x86\x91\x05\x00\x7f\xaa\xf2\x00\ \x20\xfc\xfe\x94\x0c\xb7\xc7\x0d\xcb\x4a\x58\xfe\x94\x54\x27\x27\ \x54\x40\x6e\x97\x52\x00\xdc\x00\x69\x9c\x3b\xef\x24\xbb\xdd\x6d\ \xd0\x5a\x42\x6b\x62\xad\x41\x86\xe1\x4a\x01\x0a\xfc\x44\x01\x10\ \x49\x87\x19\x71\xa2\xb0\xe3\xf3\x0d\x38\xa9\xa9\x0e\x79\x3c\x12\ \x80\x24\x40\x82\x48\x0a\x08\x8f\x24\x89\x14\x23\x25\xa1\x7f\xfe\ \xf3\xda\x13\xd7\x5c\x70\x5a\x92\x24\x09\x09\x43\x18\x12\x43\xfd\ \x00\x48\xf2\x7a\xc5\xf0\x35\x01\x92\x40\x12\x04\x09\x9f\x57\x10\ \x41\x1e\x7e\x7a\xcd\xba\x78\x76\xde\x33\x90\x52\x68\x12\x50\xc5\ \xa3\x3f\x88\xe7\xe6\x55\x19\x86\x01\xa5\x54\xfa\x1f\xdb\xe1\xfe\ \x51\x02\xbc\x1e\xaf\x4f\x90\x80\x20\x61\x07\x52\xd3\xd8\xe7\xf7\ \x2b\xa9\x48\x32\x43\x00\xac\x29\x3f\x3f\x86\x50\xe8\x03\x00\x20\ \x22\xc0\x30\xfa\xb9\xa0\xa0\x17\x80\x70\x33\x67\xf9\x92\x49\x4e\ \xa5\x01\x99\x60\xb6\x98\x59\x82\x48\x31\xb3\x02\xb3\xba\xeb\xd0\ \x0f\xa6\x3b\xec\x4c\x36\x84\x91\x28\x0b\x96\xb6\x82\x59\x5d\x3d\ \xe2\xca\x3e\x45\x0a\x52\x28\x3c\x55\xfb\xec\xb7\x31\xd4\xf7\xff\ \x01\x92\xa0\xa1\x92\x29\x59\xee\x78\x6e\xd7\x83\x6d\x65\xe7\xdd\ \xd4\xba\x70\xf9\xa5\x9d\xb7\x7d\xf7\x79\x77\x30\x35\x9e\x8c\x43\ \x20\xfe\xc8\x0e\xf7\x8f\x13\x10\x37\xe3\xda\xb2\x2c\x28\xc3\x50\ \x01\xbf\xdf\xe7\xf3\xba\xdd\x04\x92\x5a\x83\x00\xd2\xcc\xcc\x08\ \x85\x1a\x58\xeb\x64\x02\xcc\xe5\x6a\xc7\x9c\x39\x5d\x4c\x04\x10\ \x91\x02\x82\x5e\x20\x4d\x02\x2e\x10\x19\x20\x52\x00\x14\x11\x29\ \x26\x52\x8d\xd1\xa6\xc9\x52\x28\xa4\xbb\xd2\x77\x7d\x67\xc2\xed\ \x8d\x18\x6a\xcf\xf3\xe6\xbd\xaa\x84\x42\xbf\x15\xbe\xf2\x81\x93\ \x0f\x97\x82\x48\x0d\xff\xed\xe3\x3a\xfe\x1b\x60\x52\xc4\xa4\x78\ \x08\x3b\xbe\xfd\xec\xee\xa3\xdf\x7a\xa8\x26\x3d\xdd\xef\x0e\x04\ \x03\x86\xed\x38\x70\x6c\x3d\x14\x87\x7d\x0a\x02\x22\x91\xb0\x19\ \x8d\x46\xe1\x76\xbb\x95\xcf\xef\x4d\xf1\xb8\x84\x1f\x80\xd2\x8c\ \x8f\x77\x75\x58\xb6\x6c\xef\xc7\xff\xe1\xf5\xf6\x12\x11\xc0\x4c\ \x00\x88\x93\x94\xbb\xdd\x40\x0a\x31\x4b\x06\x04\x03\x32\xa1\x13\ \x0a\x80\xec\xb7\xc2\xe3\x25\x49\xb8\x85\x3b\x3a\x94\x05\x96\x04\ \xc8\x2f\x16\x2e\x7f\x49\x92\xd4\x12\x12\x9b\xda\x37\xdf\x00\x40\ \x0e\xd8\x51\x63\xb8\x0f\x0f\x01\x0c\xc9\x0c\xa9\x87\xc0\x1a\x52\ \x6b\x48\xcd\x10\x92\xe0\xf7\x79\xa5\x37\x18\x08\x7a\x07\x06\x06\ \x61\xd9\x76\x04\x80\xf5\xe9\x96\x41\x76\x22\x9d\x1d\x9d\x09\xb7\ \xdb\x2d\x03\x7e\x4f\xba\xc7\x48\x64\x82\x21\x5b\xa2\x5d\xb2\x35\ \xd6\xe6\x21\xc7\x26\x9a\x32\xa5\x17\x86\xd1\xc7\x00\xa0\xb5\x4a\ \xce\x06\xa2\xe4\x66\x97\x09\xcc\x82\x01\xba\xe7\xe8\x4f\xce\xb9\ \xff\xc4\x03\xd3\x19\x50\x86\x70\x49\x02\x54\x9f\xd5\xbf\x58\x91\ \x44\x40\xa5\xf4\xf2\xb0\x63\x04\xd4\x0d\x25\x5f\x69\x2e\xf2\x16\ \xbe\xa6\x84\x44\xd8\x0a\x5f\xf8\x6e\xcb\xfa\x3c\xbf\xf2\x89\x84\ \xb6\x0c\x06\x94\x06\xd4\xc7\xa5\x4e\x42\x6b\x28\xcd\xc9\xd2\xb1\ \x21\x7c\x6e\x64\xb8\x15\x05\x52\xd3\x82\xc1\xae\xae\x2e\x24\x12\ \x66\xd7\x99\xc4\x01\x7d\xcd\xcd\x2d\x51\xb7\xcb\x0d\xbf\xcf\x9d\ \xe9\xf7\x20\x77\xad\x73\x5f\xf9\x2d\x87\x6f\xfe\xd5\x8f\x4f\xdc\ \xff\x6d\x28\x83\x99\x19\x34\x66\xcc\x46\x10\x01\x25\x25\x7b\x99\ \x99\x38\x69\x01\x82\x89\xc4\xc3\xd5\x4f\x4e\xff\xf2\xde\x9b\x6f\ \x7e\xaf\x63\xeb\x13\x6b\x5a\xde\xfd\xf5\x4d\xfb\x6e\xbb\x86\x87\ \x14\x20\x20\x4d\x91\xc2\xe4\xd4\xd2\x03\x34\xd4\x36\x8c\x37\xe7\ \xbe\xf2\x50\x9a\x2b\xfd\x80\x24\x85\x87\xab\x1e\x79\xec\xb1\xaa\ \x27\xca\xa5\x30\x24\x03\x2a\xe1\x58\x86\x06\x14\x0f\x29\xac\x9d\ \x21\xc5\x1d\x28\xc7\x86\x32\x13\x40\xc0\x87\x5c\xb7\x81\xa0\xdf\ \xe7\x0b\xb6\x77\x74\xc2\xb2\xac\x16\x00\x58\xbc\xf4\x62\xfa\x34\ \x71\x40\x77\x55\x75\x55\xa7\xed\x38\x69\xc1\x80\x37\xcf\x92\x5d\ \xa5\xb5\xb4\xe5\xf6\x80\xe9\x51\xdd\x89\x9e\x9c\x98\x13\x93\xef\ \xb4\x6e\x1a\xdb\x7c\x63\xc9\xa9\x11\xfe\xf9\xdf\xbc\x24\x6f\x59\ \x8d\xd6\x8e\x14\x52\x61\x73\xc7\xf6\x9c\x87\xab\x9f\xf8\x69\xd4\ \x8e\x16\xc5\xb5\x05\x83\x92\x79\x89\xaa\x48\xcd\xed\x0d\x03\x8d\ \xab\x7b\x12\x3d\x2e\x45\x49\xa7\x7c\xef\xa4\xef\xef\x1d\xb4\x63\ \x2e\xaf\xf4\x68\x1e\x0a\x12\x18\xc0\xda\x79\x2b\xbf\xb1\x6c\xdb\ \x25\xaf\x98\xda\x1c\xb3\xaa\xe9\xad\x5f\x55\x76\xbf\xbf\xea\xca\ \xe2\xab\x7e\x33\x3f\x77\x71\x9b\x05\xc6\xb3\x55\x8f\x97\x1b\xda\ \x1f\xbf\x28\xf3\xba\x93\xb6\x0d\x24\x2c\x8b\x12\x96\xa1\x05\xc1\ \x9d\x96\x82\x22\xb7\xa4\x4c\x29\x65\x6a\x4d\x4d\x2d\x00\x1c\x06\ \x80\x0d\xeb\xde\xe6\x4f\x43\x40\x4f\x5d\x5d\x5d\x6b\x67\x77\xcf\ \xd8\xf4\xd4\x40\xc6\x56\xb3\xf2\x8b\x19\xae\x0c\x15\xb6\x22\x30\ \x1d\x1b\x37\x1f\xb8\xe3\xc1\x88\x1d\x29\xd2\xac\xa1\xa1\xd1\x15\ \xef\xbc\xe7\x86\x51\x5f\x3e\xd0\x14\x6d\xf6\x3f\x5a\xfd\xd4\xcf\ \x12\x3a\x51\x20\x85\x32\x15\x98\x97\xe5\x2e\xba\x7b\x7d\xeb\x86\ \xff\x00\x58\xac\x69\x7b\x77\xb2\x0b\x86\xad\x84\x01\x49\x22\xaa\ \x89\x0c\x45\x8a\x40\xc4\x60\x86\x4e\xfa\x11\x30\x11\x2a\xd2\xcb\ \x5f\x39\xd0\xfb\xe1\x9d\x0c\x46\xa7\xd9\x7d\xe9\x93\x55\x4f\xcc\ \x7f\xbd\xf1\xf5\x5d\x83\x76\x34\xd4\x67\x86\xcf\xd2\x9a\x11\x8d\ \x39\xff\xbc\x34\x78\xfd\x11\x76\x5c\xe8\x8c\x76\xd1\xa0\xaf\xb1\ \x28\xc5\x35\x2d\xd7\xad\x8c\xcc\x48\x24\xea\x6b\x3c\x7d\x1a\x86\ \x61\x1c\x3c\x93\x48\xb0\xad\xb6\xb6\xe6\xf8\x91\x23\x47\xcf\x9d\ \x3f\x6f\x4e\x5a\x7a\x47\x53\xfa\xc4\xd4\x71\x83\x75\xed\xe6\x33\ \x83\x66\xcb\xed\x2e\x11\x2d\x22\x12\xf0\x49\x6f\x73\xd4\x89\x16\ \xbc\xdd\xb2\xfe\x5e\x25\xdc\xdf\xdf\xd8\xb1\x65\x85\xc5\x76\x01\ \x91\xc4\x35\x23\x56\x7c\xe7\xe2\x11\x97\x9f\x20\x00\x5b\xda\xb7\ \xc6\x35\x3b\xbe\x83\xbd\x87\xcf\x4f\x35\x82\xad\x4a\x48\x10\xc8\ \x85\xa4\xd9\x93\x1e\xf2\xd2\xc3\x43\x94\xd0\x36\xfd\x64\xca\x8f\ \xd7\x3b\x44\xeb\x6f\xd9\x7b\xcb\x1d\xcd\xf1\x96\x0b\x19\x48\xeb\ \x34\x3b\x2f\x62\x06\x04\x24\x88\x80\xed\x5d\xef\xfc\x07\xc7\x7d\ \xdf\xdd\xd6\xbb\xfa\x8e\xde\x78\xff\xe8\x69\xe9\x53\x74\xa7\x93\ \x71\x30\xc7\x5f\xe8\x3f\x70\xe0\x30\xda\xda\xda\x9b\x53\xd3\x52\ \x0f\x9e\x89\x0f\x18\x18\x1c\x08\x9f\x78\x7f\xef\x3e\xd3\xe3\xf3\ \x51\x91\x4c\xef\xbf\xb6\xf8\x9a\x03\xd7\xf8\x1e\x58\xe5\xb1\xf3\ \xdf\x65\x22\xcc\xcb\x39\xf7\xa1\x67\x67\x3d\xfd\x8d\xa0\x91\x76\ \x1c\x82\xc4\x1b\xcd\x6f\xff\xac\x3f\x11\x2e\x67\x22\x4c\x49\x2d\ \x7b\x7e\x79\xd1\x65\xa7\x4c\x6d\x1b\x9a\x59\x65\xb9\xb3\x0e\x4a\ \xa1\xe0\x95\x5e\x33\x62\x0f\x14\x4b\x52\x08\xa8\xc0\x69\xcd\xac\ \x78\x28\x3e\x18\x2e\x35\x91\x12\x24\xa5\xc9\x8e\xa1\x99\xd5\x23\ \x33\xff\xeb\x91\x8b\xf2\x2f\xfe\x97\x5c\x6f\xfe\xb6\x7c\x6f\xd1\ \x86\x31\x29\xa5\x2f\xce\xcb\x5a\x72\xaf\x9b\x7c\x1d\x00\xf9\xb6\ \xf5\xbf\xf9\xb0\xe5\x58\xa3\x05\x04\x66\x64\x4d\x15\x2d\x66\xed\ \x34\x13\xf6\xc8\xfd\x07\x3e\x40\x2c\x16\xff\x60\xf5\xaa\x37\x3a\ \x3e\xb5\x05\xe4\xe5\x17\x86\x5b\x5b\x9a\x8e\xee\xde\xbd\xfb\x44\ \x43\x43\x53\x45\x49\x66\x21\x79\x52\xf2\xfd\xbe\x12\x4c\x49\x7c\ \xf4\xc0\xc3\x17\xcd\xe0\x5f\x65\xa6\x93\x88\x33\x8b\x8b\x0a\x2e\ \x79\x6e\x55\xf3\xca\x5b\x23\x56\xa4\xd8\xab\xbc\xad\xd3\x32\xa6\ \xfe\xf6\xb6\xb1\xb7\x6e\xb7\x99\x95\x33\x74\x44\x36\xd2\x5f\x7c\ \xac\xdf\xea\x3f\xbb\x2d\xde\x7e\x3e\x83\xa5\x41\x0a\x97\x15\x2e\ \x7f\x9c\x01\x43\x92\x44\x72\xa5\xe6\xe4\x79\x39\x27\xd7\x5a\x22\ \x39\x9c\x43\xc7\xd5\x63\xbe\x7a\xf4\x4a\xfe\xea\xd1\x84\x05\x98\ \x26\x10\x8f\x01\xf5\xfd\x8d\xbb\x1a\x13\x75\xcb\xa1\x49\x69\x47\ \xf7\x5c\x9b\x75\xd7\xcb\x17\x15\x95\x7c\xc1\xa2\xae\x91\xe1\xae\ \xb0\x7b\xdf\xfe\xfd\x70\xb4\xf3\xdb\x33\x49\x88\xc8\x81\x48\x98\ \x01\xb8\x62\xd1\xf8\x88\xc2\xa2\x11\xd3\xe7\x9e\x73\xb6\x3b\x1e\ \xeb\xb7\xd3\x32\x03\xde\x93\x0d\xd6\xe1\xa8\xc9\x94\x97\x2d\x0c\ \xd3\xd1\x62\x54\x60\x6c\xdf\xd2\xc2\x8b\x36\x9a\xda\xac\xb9\x61\ \xec\xd7\x5e\x9a\x9b\x73\x5e\x9d\xa5\x6d\xc1\x24\x05\x0b\x41\x00\ \x44\x73\xf4\xb4\xaf\x76\xa0\x76\x3e\x83\xbd\x00\x3c\x02\x02\xe3\ \x53\x27\x6c\x2f\x4d\x2d\xed\xd6\x44\x82\x01\xa1\x89\x84\x1e\x5a\ \x41\x34\x20\x1c\x90\x70\x08\xc2\x21\x12\x16\x20\xe2\xb6\x96\x66\ \x42\x4b\xc7\x92\x22\x1e\x87\x28\x73\x2d\x3c\x7c\xa0\x7f\xc7\xb8\ \x98\x15\x0f\x8d\xd3\x17\x3c\x78\x53\xd9\xf2\x09\xa5\xa1\x14\x4f\ \xd0\x08\x04\xb6\xbe\xb7\xdd\xf5\xde\xe6\xad\x9d\x83\x83\x83\x5f\ \x6f\x6f\x6d\x4e\x7c\x6a\x02\x86\xcf\x1e\x4c\x33\x96\x96\x48\xd8\ \xe5\xe7\x9e\x77\x6e\xba\xc7\x25\x8d\xb4\xd4\x40\x86\xcb\x2d\xa3\ \x87\x4e\x8a\xea\x40\x0a\x79\x7c\x1e\xa2\x84\x4d\xc2\x61\xd0\x84\ \xd4\xb2\x76\x97\xf2\x69\x0d\x08\x16\x92\x34\x41\x38\x60\x01\x86\ \xcc\xf2\xe4\x0c\xbc\xdf\xb5\x67\xb6\x66\x0e\x48\x92\x10\x24\x31\ \x21\x75\xe2\x8e\x71\xc1\x09\xdd\xc3\xc1\x8d\x03\x48\x0d\x48\x4d\ \x90\xf6\xd0\xb5\x4d\x90\x36\x43\x5a\x0e\xa4\x93\x20\x61\x25\x84\ \x30\xe3\x90\xf1\x18\x64\x3c\x4e\x62\x32\x2e\xdc\x35\x66\xe0\xb2\ \x17\x96\x8c\x9e\x36\xb1\x74\x04\x9d\xef\x33\xc8\x48\x44\xad\xcc\ \x5f\x3c\xf4\x08\xf5\xf5\xf7\x3f\xfc\xfe\x9e\xed\xeb\x96\x5e\x78\ \x09\xd5\x54\x9f\x3c\xa3\x9c\x60\x02\xc0\xb1\x43\x87\x0f\x6d\x59\ \xb5\x6a\x35\x02\xc1\x34\x5f\x22\xd2\xed\x19\x5b\x48\x97\x17\xe6\ \xd2\xc8\x63\xd5\xb0\xfa\xc2\x90\x31\x13\x32\x16\x83\x8a\xc5\x49\ \x9a\x26\x94\x69\x41\x99\x9a\x55\x82\x59\x59\x0c\x65\x11\x54\xa6\ \x37\x64\x05\x5c\x69\xad\x62\x28\xa2\xf5\x1b\x81\xd6\x73\x73\xe6\ \xd7\x39\x80\x72\x00\x65\x7f\xa2\xb4\x01\x65\xd3\x10\x18\x2a\x61\ \x43\x25\x4c\x28\x33\x31\x04\x13\xca\x8c\x43\x99\x31\x18\x7d\x61\ \x5b\xfb\xbd\xce\xc8\x31\x05\x74\x79\x7a\xf2\x2c\x39\x7b\xcd\xda\ \x77\x44\x5d\x7d\x43\x6f\x20\x25\xe5\x19\x00\x58\xf7\xce\x5b\x7c\ \x46\x29\x31\x00\x50\x86\xaa\xea\xef\xed\xda\xb4\x6a\xd5\x5b\xb5\ \x1f\x7c\x70\x10\x42\xc8\x74\x2f\xe2\xf9\x33\x27\xe2\xbb\x60\x4e\ \x39\x56\x4d\x3c\x30\x00\x39\x18\x25\x39\x18\x65\x35\x18\x83\x8a\ \xc6\xa0\x62\x71\xa8\xb8\x09\x95\xb0\xe1\x4a\x30\xb9\x12\x4c\x2e\ \xb7\xf2\x27\x94\x74\x41\x09\x17\x52\x5d\x19\x6d\x3e\x57\x3a\x2c\ \x90\xcb\x02\xb9\x12\x20\x57\x82\xc8\x65\xd1\x50\x9d\xc9\x65\x3a\ \xe4\x32\xe3\x49\xc4\x87\x10\x8b\x92\x2b\x16\x23\x57\x2c\x4a\x46\ \x38\x4c\x04\x47\xa5\x4c\x9b\xa0\xee\x1c\x91\xc5\xb9\x00\xd2\xeb\ \xeb\x1b\x7c\xaf\xbf\xf1\x26\x62\xf1\xd8\xcf\xd7\xac\x7e\xb3\xe1\ \x4c\xd3\xe2\xc3\x53\x00\x5a\x6b\x13\x80\x1d\x09\x47\x02\xd1\x58\ \x62\xe6\xc2\x85\x0b\x04\x3b\x36\x32\xd3\xbd\xe9\x6e\x1f\x8d\x38\ \x52\xcd\x3b\x1c\x87\x94\xc7\xc5\xca\x76\x88\x1c\x0d\xa1\x87\x60\ \x6b\x12\xcc\x48\x82\x20\xaa\xc3\xc7\xf3\xba\xcc\xae\x89\x24\x08\ \x0b\xf2\x96\xbc\x50\x14\x18\xd5\x9b\x9c\xe3\x10\x9a\x20\x1c\x86\ \xb0\x19\xc2\xd6\x10\xb6\x05\x61\x25\x92\x48\x58\x10\x89\x38\x44\ \xc2\x84\x88\xc7\x48\xc4\xa3\x90\x91\x08\xd0\xdd\x8b\xc4\xb4\x52\ \xdc\x31\x69\x04\x4f\xf4\x2a\x1a\x7e\x4f\x89\x8e\x1d\x3d\x56\xfb\ \xfe\x9e\xed\x5f\x06\x60\x7f\x66\x02\x86\xa4\x2b\x91\x88\xcb\x8e\ \x8e\x8e\x54\x12\x72\xfc\x9c\xb3\xe7\x80\x2d\x93\x72\xb2\x3c\x79\ \x7e\x1f\x15\xd5\x34\x62\x7f\xcc\x24\x56\x12\xd2\x71\x88\xec\x21\ \x68\x0d\x62\x4d\xe4\x68\x22\xad\x89\xd2\x5d\xd9\x9d\x5d\x66\x87\ \x9c\x99\x39\x6f\xe5\xdc\x9c\x45\x55\x09\xad\x85\x66\x41\xb6\x4e\ \xf6\xb7\xec\x21\x24\x92\x48\x58\x44\x71\x93\x28\x11\x27\x8a\xc7\ \x89\x62\x51\xa2\xe8\x20\x28\x12\x81\x13\x1d\x04\x57\x8c\xc7\x3f\ \x4f\x19\x85\x19\xa9\x6e\x02\x00\xf1\xfc\x0b\x2f\xd1\xca\x95\x6f\ \x41\x10\x6e\x6a\x6c\xac\x3f\xf2\x59\xce\x06\xe5\x1f\x68\xeb\x1e\ \x18\x88\x38\xbd\x7d\xe1\x89\x1e\x8f\x2f\xeb\xac\x19\xd3\x61\xc7\ \xa3\x08\x65\xba\x0b\xbd\x7e\x4c\xa8\x3d\xcd\xfb\x23\x83\x14\x95\ \x92\x95\xe3\x10\x39\x0e\x27\x77\xca\x3a\x79\xfa\xa9\x35\xe0\x17\ \x69\xf1\x8a\xd4\xd9\x47\x0b\xbd\xa3\xbb\x12\xb6\x43\x70\x64\xf2\ \x00\xd9\x06\x6c\x2b\x09\x2b\x91\x44\x22\x01\x98\xf1\xe4\x92\x67\ \xc6\x81\x78\x8c\x28\x16\x65\xee\xed\xa3\x78\x34\x0a\xdf\xd4\x09\ \xf8\x4e\xd9\x28\xae\xc8\xf0\x26\xc3\xfb\x77\xd7\x6d\xe0\x27\x9f\ \x7a\x86\x72\xb2\x32\x8e\xac\x7b\x77\xcd\x37\x3e\xeb\xe9\xf0\x1f\ \x22\x20\x06\xa0\xab\xa3\xbd\x2d\xd1\xd2\xd6\x31\x31\x25\x25\x90\ \x5a\x36\xb9\x94\x61\x27\x28\x94\xe9\xca\x4a\x4b\xa5\x05\x03\x51\ \x3e\xd5\xd4\x46\x8d\xcc\x70\x69\x4d\xe4\x38\x04\xc7\x21\xb2\xed\ \xe4\x08\x3b\xf6\x50\xdd\x22\xd2\xb6\xc0\x70\xdd\x4a\x10\x25\x4c\ \xa2\x44\x82\xc8\x34\x93\xf5\x78\x8c\xc8\x8c\x13\xc5\x62\x84\x78\ \x94\x68\x70\x00\xdc\xd9\x85\x48\x9a\x1f\x65\xd3\x26\xd2\x3d\x65\ \xc5\xc8\x1f\x1a\x79\xde\xbc\x65\x1b\x1e\x7f\xfc\xbf\x30\x73\xd6\ \x1c\x2a\xcc\xcb\x0a\x5d\x76\xe9\xc5\xe3\xde\x79\x77\xfd\x9b\x9f\ \x37\x01\x00\x10\x01\x38\xda\xd5\xd9\x39\xd8\xd6\xd1\x39\x45\x28\ \xc3\x57\x51\x51\x0e\xc9\x9a\xb3\x53\xa5\x2b\x3d\x8d\xe6\x29\x83\ \x33\xda\x3a\xe9\x78\x6f\x18\x03\x9a\x99\x98\x49\x68\x1b\xa4\x1d\ \xc0\xb2\x19\x8e\x43\xb0\x2d\x86\x6d\x11\x2c\x8b\x61\x25\x92\x65\ \x22\x41\x48\x98\x8c\x84\x49\x88\x9b\x0c\x33\x4e\x88\x47\x99\x07\ \x06\xc8\xea\xea\xe1\x98\x19\x27\x4f\x49\x21\xae\x2d\x1f\x4b\xd7\ \x8e\xcf\x67\x97\x57\x11\x03\xa0\x75\xeb\x37\xd2\xf3\xcf\xbf\x80\ \xe9\xd3\xcb\x51\x5e\x36\x09\xc1\xa0\x9f\x8f\x1e\x3d\x36\x65\xe5\ \x9b\xaf\x8f\x7a\xe0\xc1\x87\x56\x7d\xde\x04\x00\x40\xbb\xd6\x4e\ \x67\x7b\x5b\x6b\xb8\xbe\xa1\x69\x5c\x24\x32\x10\x2c\x2d\x9d\x00\ \xbf\xc7\x83\x80\xdb\xa1\x50\x86\x1c\x9d\x99\xce\x8b\x94\x02\x22\ \xfd\xd4\xd7\xd3\x87\x2e\xc7\x01\x6c\x0b\xca\xb2\x88\x12\x09\xc0\ \x4a\x50\xd2\xc4\x13\x94\x34\xf1\x18\x21\x1e\x07\xe2\x71\x42\x2c\ \x0a\x8a\xc7\x08\x03\x03\x30\x7b\x7a\x28\x0a\xcd\xb9\x85\xd9\xb4\ \x70\xca\x58\x7c\x6b\x72\x31\xc6\x15\xa6\x01\x92\x88\x1d\xc7\xc1\ \x4b\x2f\xbf\x8a\xb7\x56\xfd\x0e\xa1\x9c\x4c\x32\x0c\x83\x02\x29\ \x5e\x94\x8c\x1e\x45\x39\x39\x21\xda\xb7\x7f\x7f\xc5\x15\x97\x5d\ \x36\x71\xd5\x5b\x6f\xbf\xf1\xb9\xbd\x22\xf3\x09\x39\x91\x48\x98\ \x6f\x1c\x39\x7c\x60\x50\x6b\xe7\xd6\xba\xfa\x86\x31\xb7\xde\x7c\ \x23\xca\xa7\x4c\x66\x9f\x02\xc6\x87\xc8\x13\x4a\xc5\x8a\x8e\x3c\ \x7c\xa1\xb5\x9b\x8f\xb5\x76\xd1\xee\xee\x3e\x1c\xb7\x35\xe2\x20\ \xb0\x94\xac\xa5\x24\xa6\xe4\x20\x82\x99\x91\x9c\x32\x2c\xb4\x26\ \x92\x04\x4f\x5a\x80\xa7\x8d\x1f\x45\x67\xe7\x67\xd0\xa4\x50\x3a\ \xfb\xb3\x53\x08\x94\x4c\x34\x51\x5b\x5b\x3b\x3d\xf6\xc4\x53\xd8\ \xbf\x6f\x1f\xa6\x55\x94\xc1\xe3\x75\x73\x4b\x73\x0b\xb4\xe3\x20\ \x1c\x8e\x60\x52\x69\x29\xb7\xb4\xb4\xf0\xb1\xe3\xc7\xbf\xd4\xd9\ \xde\x64\x66\x87\x0a\x3f\xf5\x31\xf9\x9f\x9a\x38\x48\x07\x70\x61\ \x7e\x41\xf1\x55\x93\xcb\xca\x96\x5e\x74\xe1\x52\x71\xe5\x17\x2f\ \xe7\xdc\xdc\xd0\xd0\x3d\x34\xe2\x0e\x21\x1c\x23\xf4\x44\x18\xfd\ \x51\xd4\x46\x06\xe9\x94\x99\x40\x57\xc2\x46\x44\x6b\x24\x92\x49\ \x55\xb8\x0c\x89\x80\xcb\x8d\xac\x14\x0f\x8f\x0e\xfa\xa9\x24\x23\ \xc0\x48\xf5\x01\x29\xc6\xc7\x8f\xc3\xd1\x68\x0c\x6f\xaf\x5e\x4b\ \x6f\xae\x7c\x0b\xb5\xa7\x6a\x07\xd8\xb1\x5e\xbb\xed\xd6\x9b\x2f\ \x9e\x3e\x7d\x7a\xd6\x8e\x1d\x3b\xf9\xe4\xc9\x13\x94\x93\x9d\x8d\ \x89\x13\xc7\x73\x51\x61\x21\xfa\xc3\x61\xaa\xa9\xae\x85\xcf\xef\ \x7d\xf5\xab\xd7\xdf\xb4\xe2\xf3\xb4\x80\x61\xe9\x05\xb0\xb2\xa5\ \xb9\xbe\x3e\x1e\x8f\x9e\x6c\x6d\x6d\x59\xfe\xfe\xde\x7d\xc5\x17\ \x9c\xbf\x00\x4b\x97\x2c\x46\x5e\x5e\x2e\x3c\x12\xec\x49\x01\x72\ \x52\x08\x0e\x50\x62\xda\x28\x31\x2d\xc0\xb4\x19\x5a\x27\xd3\x1f\ \x82\x00\x43\x11\xdc\x0a\xf0\x18\x04\x45\xe0\x4f\x8e\xc1\xc0\xc0\ \x20\xbd\xb7\x79\x0b\xad\x5b\xb7\x11\x47\x8f\x1d\x43\x38\xdc\xf7\ \x7e\x6d\xf5\xc9\x1f\x0e\x0e\x0e\xac\x2f\x2b\x2b\x9b\x1b\x08\x04\ \x36\x8c\x1f\x3f\xce\x6b\xdb\x16\xd7\xd6\xd6\x02\x44\xe8\xe8\xe8\ \xc2\xd4\xa9\x53\xd8\xe5\x56\xdc\xd0\xd8\xf8\xa5\x53\x35\x1f\xc5\ \x66\x9f\xb3\xe0\x07\x1d\xed\xad\x1d\x7f\x4a\x7e\xf0\x4c\xde\xb3\ \xcb\x05\x30\x2b\x2b\x27\xf7\x8a\xfc\xbc\xc2\xb9\xa3\x47\x8f\x2e\ \x9e\x7e\xd6\x34\xcc\x9b\x7b\x0e\xc6\x8d\x1d\xc3\xa1\x50\x0e\x84\ \x10\xbf\x7f\xff\x4f\x26\x81\xf0\xc9\x7a\x6f\x6f\x1f\x4e\x9d\xaa\ \xc7\xae\xdd\xbb\x69\xdf\xbe\x03\xa8\x3d\x75\xca\x09\xf7\xf7\x55\ \xb5\xb6\x36\x3d\xde\xd3\xdd\xf5\xf2\x27\xbf\x23\xd8\xb7\x6f\xdf\ \x4c\xc3\x30\xb6\x4a\x29\xbd\xef\xbd\xf7\x1e\x57\x55\x55\x51\x7e\ \x5e\x2e\xc6\x8d\x1b\xcb\xa3\x8a\x47\xe2\xc8\xd1\x63\x74\xec\xf8\ \x09\xf4\xf7\x87\x61\x9a\xe6\x91\xd6\xd6\xd6\x5f\x6c\xda\xb4\xe9\ \xb9\x3f\xd7\xf7\x02\x45\x00\x66\xe7\xe6\x15\x2c\x09\xa6\x66\xcc\ \x0c\x85\x72\x26\xe6\xe5\xe5\xab\x92\xd1\xc5\x28\x1e\x59\x8c\x82\ \xc2\x7c\x84\x72\x72\x90\x9a\x1a\x84\x52\x0a\x20\x82\xe3\x38\x18\ \x18\x18\x44\x57\x67\x27\x9a\x9a\x5b\xd0\xd0\xd0\x88\x53\xa7\xea\ \xd0\xda\xd6\x8a\x9e\xee\xde\xfe\xe8\x60\xe4\x60\x4f\x4f\xd7\x1b\ \xed\xed\xad\xab\x01\xfc\x5f\xe1\xed\x87\x1f\x7e\x28\xa6\x4e\x9d\ \xaa\xab\xaa\xaa\xce\x75\x1c\x67\x5d\x5d\x5d\x9d\xf7\xe4\xc9\x93\ \x5c\x5d\x5d\x8d\x91\x23\x47\xa0\xa6\xa6\x1a\x27\x4f\x56\xd3\xdc\ \xb9\x73\x79\xd9\xb2\x65\xfa\x83\x0f\x3e\x90\x3b\x77\xee\x44\x7b\ \x7b\xfb\x9d\xdb\xb7\x6f\x7f\xe0\xcf\xf9\xc1\x44\x00\x40\x29\x80\ \xb2\x8c\xac\x9c\xa9\xa9\xc1\xf4\x69\x5e\x9f\x2f\x94\x12\x48\x49\ \x77\xbb\xdc\x1e\x97\xe1\x52\x86\xa1\x04\x09\x21\x6c\xdb\xd6\x09\ \xd3\xd4\x96\x63\xd9\xb1\x68\x7c\xd0\x34\xe3\x3d\xb1\x68\xf4\xe8\ \x40\xa4\x7f\x7f\x67\x67\xfb\x4e\x00\xfb\x87\x5e\xb1\xff\x1f\xa5\ \xb2\xb2\x72\x86\xd7\xeb\xdd\x26\x84\xf0\x6e\xdb\xb6\x8d\xb7\x6f\ \xdf\x4e\xab\x56\xad\x82\xd7\xeb\xe5\x3b\xee\xb8\x83\x2e\xbd\xf4\ \x52\x8e\x44\x22\xa8\xac\xac\xa4\x5d\xbb\x76\xa1\xb7\xb7\xf7\xee\ \xad\x5b\xb7\xde\xf7\x59\x7c\xc0\xff\x24\x11\x00\xef\x03\x78\xbf\ \xa7\xab\xc3\xdf\xd3\xd5\x91\x0d\x20\x37\x25\x10\x1c\x61\x28\x95\ \x03\x12\x39\x86\xcb\x08\x28\x65\x48\xdb\xb2\x07\x1c\xdb\xea\x26\ \x81\xb6\x81\xc1\xe8\xe9\x78\x74\xb0\x09\x40\xeb\x50\xf0\xf5\x27\ \xc9\xa1\x43\x87\x44\x79\x79\xf9\xbe\xea\xea\xea\x25\xcc\xbc\x2e\ \x27\x27\xc7\xa7\xb5\xe6\xe2\xe2\x62\x6a\x68\x68\xc0\xaf\x7f\xfd\ \x6b\x04\x83\x41\xcc\x9c\x39\x13\xd3\xa7\x4f\x67\xd3\x34\xa9\xb2\ \xb2\xf2\x27\xb3\x67\xcf\x8e\x57\x56\x56\x3e\xf0\xe7\xb0\x80\xbf\ \x9a\xd4\xd5\xd5\xcd\x5e\xbb\x76\xed\xb6\xfa\xfa\x7a\x57\x5d\x5d\ \x1d\x4e\x9c\x38\xc1\x55\x55\x55\x94\x9e\x9e\xce\x77\xdd\x75\x17\ \x4d\x99\x32\x85\xb5\xd6\xd8\xbb\x77\x2f\xed\xde\xbd\x1b\x7d\x7d\ \x7d\x77\x57\x56\x56\xde\xf7\xa7\x06\x42\x9f\x9b\x18\x86\x8b\x5c\ \x2e\x17\xd9\xb6\xfd\xb9\xdd\xf3\xd8\xb1\x63\x34\x6e\xdc\xb8\xa6\ \x19\x33\x66\x9c\xef\xf1\x78\x8a\x3d\x1e\x0f\x0b\x21\x60\xdb\x36\ \xb5\xb5\xb5\xe1\xd0\xa1\x43\x54\x54\x54\x84\xdc\xdc\x5c\xe4\xe4\ \xe4\xc0\x71\x1c\xea\xec\xec\x5c\x18\x0c\x06\x07\xdb\xda\xda\x76\ \xff\x45\x09\xd0\xda\xc1\xe7\xa9\x3c\x00\x3c\xf1\xc4\x13\x00\x80\ \x05\x0b\x16\xa4\x32\xf3\x32\xa5\x14\xf9\xfd\x7e\x1a\x7a\x6d\x07\ \xa7\x4f\x9f\xa6\x83\x07\x0f\xa2\xa0\xa0\x80\xb2\xb2\xb2\x90\x9b\ \x9b\x0b\x00\xd4\xdd\xdd\xbd\x38\x3d\x3d\xdd\x69\x6b\x6b\xdb\xfe\ \x17\x23\xe0\xcf\x29\xe7\x9f\x7f\xfe\x11\xc7\x71\x66\x38\x8e\x53\ \x22\xa5\x64\xb7\xdb\x8d\x4f\x5a\xc2\xf1\xe3\xc7\x29\x3f\x3f\x1f\ \x39\x39\x39\xc8\xca\xca\x82\x6d\xdb\xd4\xdd\xdd\xbd\xd0\xef\xf7\ \x0f\x74\x75\x75\xed\xf9\x9b\x27\x60\xcb\x96\x2d\xd6\xb6\x6d\xdb\ \x5e\x9c\x37\x6f\xde\x39\x8e\xe3\x94\x28\xa5\xe0\xf3\xf9\x68\x38\ \x16\x69\x6e\x6e\xa6\xc3\x87\x0f\x23\x37\x37\x97\x32\x32\x32\x90\ \x9d\x9d\x0d\x00\xd4\xd3\xd3\xb3\x38\x25\x25\x25\xf1\x37\x4f\xc0\ \xb0\xcc\x9d\x3b\xf7\x75\x00\xb3\x6c\xdb\x2e\x11\x42\xe0\x93\x3e\ \xa1\xb3\xb3\x13\x27\x4e\x9c\xa0\x50\x28\x84\xac\xac\x2c\xa4\xa5\ \xa5\x41\x4a\x49\xbd\xbd\xbd\x0b\xff\x6e\x08\xd8\xb9\x73\xa7\xbd\ \x73\xe7\xce\x17\x37\x6c\xd8\x70\xf6\xd0\x74\x20\xaf\xd7\xfb\xf1\ \x2a\xd7\xd6\xd6\x46\x27\x4e\x9c\x40\x76\x76\x36\xa5\xa5\xa5\x21\ \x23\x23\x03\x86\x61\x90\xc0\xdf\x91\x10\x11\xa7\xa4\xa4\x5c\x4a\ \x44\x9b\x98\x19\x2e\x97\x0b\xf9\xf9\xf9\x5c\x58\x58\x88\x82\x82\ \x02\x0c\x0c\x0c\xe0\xd5\x57\x5f\xc5\xf1\xe3\xc7\x61\x59\x16\xe5\ \xe7\xe7\xb3\xc4\xdf\x99\x6c\xdb\xb6\xcd\x7a\xfa\xe9\xa7\x5f\x3a\ \x70\xe0\xc0\xd9\xb6\x6d\x7f\x6c\x09\xc3\xab\x43\x47\x47\x07\xd5\ \xd4\xd4\x20\x23\x23\x83\x02\x81\x00\x04\xfe\x0e\xa5\xbc\xbc\x5c\ \x67\x64\x64\x5c\x4a\x44\x1b\x1c\xc7\x81\x61\x18\x08\x85\x42\x5c\ \x50\x50\x80\xdc\xdc\x5c\x0c\x0e\x0e\x62\xf5\xea\xd5\xa8\xae\xae\ \xfe\xdb\x8e\x04\xff\x7f\xf2\xfe\xfb\xef\x8b\xa7\x9e\x7a\xea\x5d\ \xd3\x34\x17\x3b\x8e\x83\x78\x3c\x8e\xe6\xe6\x66\xb4\xb4\xb4\x70\ \x4b\x4b\x0b\x05\x83\x41\x16\x7f\xcf\x04\xcc\x9a\x35\x4b\x67\x66\ \x66\x2e\x57\x4a\xbd\xc3\xcc\x50\x4a\x71\x56\x56\x16\x42\xa1\x10\ \x42\xa1\x10\x62\xb1\x18\xfe\x0f\xc8\x02\xa9\x53\xd8\x3e\xc3\xb8\ \x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ " qt_resource_name = "\ \x00\x0b\ \x0f\x08\x42\x1e\ \x00\x41\ \x00\x70\x00\x70\x00\x6c\x00\x69\x00\x63\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\ \x00\x09\ \x06\xbc\x0c\x53\ \x00\x50\ \x00\x6c\x00\x6f\x00\x74\x00\x74\x00\x6f\x00\x6f\x00\x6c\x00\x73\ \x00\x04\ \x00\x04\xf6\x35\ \x00\x48\ \x00\x6f\x00\x6d\x00\x65\ \x00\x04\ \x00\x05\x37\xfe\ \x00\x4d\ \x00\x61\x00\x69\x00\x6e\ " qt_resource_struct = "\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\ \x00\x00\x00\x1c\x00\x02\x00\x00\x00\x01\x00\x00\x00\x04\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\ \x00\x00\x00\x42\x00\x00\x00\x00\x00\x01\x00\x00\x0e\x20\ \x00\x00\x00\x34\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ " def qInitResources(): QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources() spykeutils-0.4.3/spykeutils/plot/__init__.py0000644000175000017500000000250212664623646017401 0ustar robrob""" This package contains various plotting functions for neo objects. The plots are created using :mod:`guiqwt` - if it is not installed, this package can not be used. .. automodule:: spykeutils.plot.rasterplot :members: .. automodule:: spykeutils.plot.correlogram :members: .. automodule:: spykeutils.plot.interspike_intervals :members: .. automodule:: spykeutils.plot.peri_stimulus_histogram :members: .. automodule:: spykeutils.plot.sde :members: .. automodule:: spykeutils.plot.analog_signals :members: .. automodule:: spykeutils.plot.spike_amp_hist :members: .. automodule:: spykeutils.plot.spike_waveforms :members: :mod:`dialog` Module -------------------- .. automodule:: spykeutils.plot.dialog :members: :show-inheritance: :mod:`helper` Module -------------------- .. automodule:: spykeutils.plot.helper :members: :mod:`guiqwt_tools` Module -------------------------- .. automodule:: spykeutils.plot.guiqwt_tools :members: :show-inheritance: """ from interspike_intervals import isi from dialog import PlotDialog from rasterplot import raster from correlogram import cross_correlogram from analog_signals import signals from peri_stimulus_histogram import psth from sde import sde from spike_waveforms import spikes from spike_amp_hist import spike_amplitude_histogram spykeutils-0.4.3/spykeutils/__pycache__/0000755000175000017500000000000012664623646016523 5ustar robrobspykeutils-0.4.3/spykeutils/conversions.py0000644000175000017500000001711212664623646017237 0ustar robrobimport scipy as sp import neo from . import SpykeException def spike_train_to_spikes(spike_train, include_waveforms=True): """ Return a list of spikes for a spike train. Note that while the created spikes have references to the same segment and unit as the spike train, the relationships in the other direction are not automatically created (the spikes are not attached to the unit or segment). Other properties like annotations are not copied or referenced in the created spikes. :param spike_train: A spike train from which the :class:`neo.core.Spike` objects are constructed. :type spike_train: :class:`neo.core.SpikeTrain` :param bool include_waveforms: Determines if the ``waveforms`` property is converted to the spike waveforms. If ``waveforms`` is None, this parameter has no effect. :returns: A list of :class:`neo.core.Spike` objects, one for every spike in ``spike_train``. :rtype: list """ waves = None if include_waveforms: waves = spike_train.waveforms spikes = [] for i, t in enumerate(spike_train): s = neo.Spike(t, sampling_rate=spike_train.sampling_rate, left_sweep=spike_train.left_sweep) if waves is not None: s.waveform = waves[i, :, :] s.unit = spike_train.unit s.segment = spike_train.segment spikes.append(s) return spikes def spikes_to_spike_train(spikes, include_waveforms=True): """ Return a spike train for a list of spikes. All spikes must have an identical left sweep, the same unit and the same segment, otherwise a ``SpykeException`` is raised. Note that while the created spike train has references to the same segment and unit as the spikes, the relationships in the other direction are not automatically created (the spike train is not attached to the unit or segment). Other properties like annotations are not copied or referenced in the created spike train. :param sequence spikes: A sequence of :class:`neo.core.Spike` objects from which the spike train is constructed. :param bool include_waveforms: Determines if the waveforms from the spike objects are used to fill the ``waveforms`` property of the resulting spike train. If ``True``, all spikes need a ``waveform`` property with the same shape or a ``SpykeException`` is raised (or the ``waveform`` property needs to be ``None`` for all spikes). :return: All elements of ``spikes`` as spike train. :rtype: :class:`neo.core.SpikeTrain` """ if not spikes: raise SpykeException('No spikes to create spike train!') tu = spikes[0].time.units times = sp.zeros(len(spikes)) * tu s = spikes[0].segment u = spikes[0].unit ls = spikes[0].left_sweep if include_waveforms and spikes[0].waveform is not None: sh = spikes[0].waveform.shape wu = spikes[0].waveform.units waves = sp.zeros((len(spikes), sh[0], sh[1])) * wu else: waves = None sh = None for i, spike in enumerate(spikes): if (u != spike.unit or s != spike.segment or ls != spike.left_sweep): raise SpykeException('Cannot create spike train from spikes with ' 'nonuniform properties!') times[i] = spikes[i].time if include_waveforms: if spike.waveform is None: if waves is not None: raise SpykeException('Cannot create spike train from ' 'spikes where some waveforms are ' 'None') elif sh != spike.waveform.shape: raise SpykeException('Cannot create spike train from spikes ' 'with nonuniform waveform shapes!') if waves is not None: waves[i, :, :] = spike.waveform ret = neo.SpikeTrain(times, t_start=times.min(), t_stop=times.max(), waveforms=waves, left_sweep=ls) ret.unit = u ret.segment = s ret.left_sweep = ls return ret def analog_signal_array_to_analog_signals(signal_array): """ Return a list of analog signals for an analog signal array. If ``signal_array`` is attached to a recording channel group with exactly is many channels as there are channels in ``signal_array``, each created signal will be assigned the corresponding channel. If the attached recording channel group has only one recording channel, all created signals will be assigned to this channel. In all other cases, the created signal will not have a reference to a recording channel. Note that while the created signals may have references to a segment and channels, the relationships in the other direction are not automatically created (the signals are not attached to the recording channel or segment). Other properties like annotations are not copied or referenced in the created analog signals. :param signal_array: An analog signal array from which the :class:`neo.core.AnalogSignal` objects are constructed. :type signal_array: :class:`neo.core.AnalogSignalArray` :return: A list of analog signals, one for every channel in ``signal_array``. :rtype: list """ signals = [] rcg = signal_array.recordingchannelgroup for i in xrange(signal_array.shape[1]): s = neo.AnalogSignal( signal_array[:, i], t_start=signal_array.t_start, sampling_rate=signal_array.sampling_rate) if len(rcg.recordingchannels) == 1: s.recordingchannel = rcg.recordingchannels[0] elif len(rcg.recordingchannels) == signal_array.shape[1]: s.recordingchannel = rcg.recordingchannels[i] s.segment = signal_array.segment signals.append(s) return signals def event_array_to_events(event_array): """ Return a list of events for an event array. Note that while the created events may have references to a segment, the relationships in the other direction are not automatically created (the events are not attached to the segment). Other properties like annotations are not copied or referenced in the created events. :param event_array: An event array from which the Event objects are constructed. :type event_array: :class:`neo.core.EventArray` :return: A list of events, one for of the events in ``event_array``. :rtype: list """ events = [] for i, t in enumerate(event_array.times): e = neo.Event( t, event_array.labels[i] if i < len(event_array.labels) else '') e.segment = event_array.segment events.append(e) return events def epoch_array_to_epochs(epoch_array): """ Return a list of epochs for an epoch array. Note that while the created epochs may have references to a segment, the relationships in the other direction are not automatically created (the events are not attached to the segment). Other properties like annotations are not copied or referenced in the created epochs. :param epoch_array: A period array from which the Epoch objects are constructed. :type epoch_array: :class:`neo.core.EpochArray` :return: A list of events, one for of the events in ``epoch_array``. :rtype: list """ periods = [] for i, t in enumerate(epoch_array.times): p = neo.Epoch( t, epoch_array.durations[i], epoch_array.labels[i] if i < len(epoch_array.labels) else '') p.segment = epoch_array.segment periods.append(p) return periods spykeutils-0.4.3/spykeutils/spike_train_generation.py0000644000175000017500000001134112664623646021410 0ustar robrobimport neo import numpy.random import quantities as pq import scipy as sp import _scipy_quantities as spq def gen_homogeneous_poisson( rate, t_start=0 * pq.s, t_stop=None, max_spikes=None, refractory=0 * pq.s): """ Generate a homogeneous Poisson spike train. The length is controlled with `t_stop` and `max_spikes`. Either one or both of these arguments have to be given. :param rate: Average firing rate of the spike train to generate as frequency scalar. :type rate: Quantity scalar :param t_start: Time at which the spike train begins as time scalar. The first actual spike will be greater than this time. :type t_start: Quantity scalar :param t_stop: Time at which the spike train ends as time scalar. All generated spikes will be lower or equal than this time. If set to None, the number of generated spikes is controlled by `max_spikes` and `t_stop` will be equal to the last generated spike. :type t_stop: Quantity scalar :param max_spikes: Maximum number of spikes to generate. Fewer spikes might be generated in case `t_stop` is also set. :param refractory: Absolute refractory period as time scalar. No spike will follow another spike for the given duration. Afterwards the firing rate will instantaneously be set to `rate` again. :type refractory: Quantity scalar :returns: The generated spike train. :rtype: :class:`neo.core.SpikeTrain` """ if t_stop is None and max_spikes is None: raise ValueError('Either t_stop or max_spikes has to be set.') if max_spikes is not None: spike_times = sp.cumsum(numpy.random.exponential( rate ** -1, max_spikes)) * (rate.units ** -1).simplified spike_times += t_start if refractory > 0: spike_times += sp.arange(spike_times.size) * refractory if t_stop is not None: spike_times = spike_times[spike_times <= t_stop] else: scale = (rate ** -1).rescale(t_stop.units) trains = [] last_spike = t_start.rescale(t_stop.units) while last_spike < t_stop: # Generate a bit more than the average number of expected spike to # be finished in most cases in one loop. The factor was determined # empirically. num_spikes = int(1.7 * ( (t_stop - last_spike) * rate).simplified) + 1 train = sp.cumsum(numpy.random.exponential(scale, num_spikes)) * \ scale.units + last_spike if refractory > 0: train += sp.arange(train.size) * refractory if train.size > 0: last_spike = train[-1] if last_spike >= t_stop: train = train[train < t_stop] trains.append(train) spike_times = spq.concatenate(trains) if t_stop is None: t_stop = spike_times[-1] return neo.SpikeTrain(spike_times, t_start=t_start, t_stop=t_stop) def gen_inhomogeneous_poisson( modulation, max_rate, t_start=0 * pq.s, t_stop=None, max_spikes=None, refractory=0 * pq.s): """ Generate an inhomogeneous Poisson spike train. The length is controlled with `t_stop` and `max_spikes`. Either one or both of these arguments have to be given. :param function modulation: Function :math:`f((t_1, \\dots, t_n)): [\\text{t\\_start}, \\text{t\\_end}]^n \\rightarrow [0, 1]^n` giving the instantaneous firing rates at times :math:`(t_1, \\dots, t_n)` as proportion of `max_rate`. Thus, a 1-D array will be passed to the function and it should return an array of the same size. :param max_rate: Maximum firing rate of the spike train to generate as frequency scalar. :type max_rate: Quantity scalar :param t_start: Time at which the spike train begins as time scalar. The first actual spike will be greater than this time. :type t_start: Quantity scalar :param t_stop: Time at which the spike train ends as time scalar. All generated spikes will be lower or equal than this time. If set to None, the number of generated spikes is controlled by `max_spikes` and `t_stop` will be equal to the last generated spike. :type t_stop: Quantity scalar :param refractory: Absolute refractory period as time scalar. No spike will follow another spike for the given duration. Afterwards the firing rate will instantaneously be set to `rate` again. :type refractory: Quantity scalar :returns: The generated spike train. :rtype: :class:`neo.core.SpikeTrain` """ st = gen_homogeneous_poisson( max_rate, t_start, t_stop, max_spikes, refractory) return st[numpy.random.rand(st.size) < modulation(st)] spykeutils-0.4.3/spykeutils/signal_processing.py0000644000175000017500000004510412664623646020402 0ustar robrobimport copy import quantities as pq import scipy as sp import scipy.signal import scipy.special import tools default_kernel_area_fraction = 0.99999 class Kernel(object): """ Base class for kernels. """ def __init__(self, kernel_size, normalize): """ :param kernel_size: Parameter controlling the kernel size. :type kernel_size: Quantity 1D :param bool normalize: Whether to normalize the kernel to unit area. """ self.kernel_size = kernel_size self.normalize = normalize def __call__(self, t, kernel_size=None): """ Evaluates the kernel at all time points in the array `t`. :param t: Time points to evaluate the kernel at. :type t: Quantity 1D :param kernel_size: If not `None` this overwrites the kernel size of the `Kernel` instance. :type kernel_size: Quantity scalar :returns: The result of the kernel evaluations. :rtype: Quantity 1D """ if kernel_size is None: kernel_size = self.kernel_size if self.normalize: normalization = self.normalization_factor(kernel_size) else: normalization = 1.0 * pq.dimensionless return self._evaluate(t, kernel_size) * normalization def _evaluate(self, t, kernel_size): """ Evaluates the kernel. :param t: Time points to evaluate the kernel at. :type t: Quantity 1D :param kernel_size: Controls the width of the kernel. :type kernel_size: Quantity scalar :returns: The result of the kernel evaluations. :rtype: Quantity 1D """ raise NotImplementedError() def normalization_factor(self, kernel_size): """ Returns the factor needed to normalize the kernel to unit area. :param kernel_size: Controls the width of the kernel. :type kernel_size: Quantity scalar :returns: Factor to normalize the kernel to unit width. :rtype: Quantity scalar """ raise NotImplementedError() def boundary_enclosing_at_least(self, fraction): """ Calculates the boundary :math:`b` so that the integral from :math:`-b` to :math:`b` encloses at least a certain fraction of the integral over the complete kernel. :param float fraction: Fraction of the whole area which at least has to be enclosed. :returns: boundary :rtype: Quantity scalar """ raise NotImplementedError() def is_symmetric(self): """ Should return `True` if the kernel is symmetric. """ return False def summed_dist_matrix(self, vectors, presorted=False): """ Calculates the sum of all element pair distances for each pair of vectors. If :math:`(a_1, \\dots, a_n)` and :math:`(b_1, \\dots, b_m)` are the :math:`u`-th and :math:`v`-th vector from `vectors` and :math:`K` the kernel, the resulting entry in the 2D array will be :math:`D_{uv} = \\sum_{i=1}^{n} \\sum_{j=1}^{m} K(a_i - b_j)`. :param sequence vectors: A sequence of Quantity 1D to calculate the summed distances for each pair. The required units depend on the kernel. Usually it will be the inverse unit of the kernel size. :param bool presorted: Some optimized specializations of this function may need sorted vectors. Set `presorted` to `True` if you know that the passed vectors are already sorted to skip the sorting and thus increase performance. :rtype: Quantity 2D """ D = sp.empty((len(vectors), len(vectors))) if len(vectors) > 0: might_have_units = self(vectors[0]) if hasattr(might_have_units, 'units'): D = D * might_have_units.units else: D = D * pq.dimensionless for i, j in sp.ndindex(len(vectors), len(vectors)): D[i, j] = sp.sum(self( (vectors[i] - sp.atleast_2d(vectors[j]).T).flatten())) return D class KernelFromFunction(Kernel): """ Creates a kernel form a function. Please note, that not all methods for such a kernel are implemented. """ def __init__(self, kernel_func, kernel_size): Kernel.__init__(self, kernel_size, normalize=False) self._evaluate = kernel_func def is_symmetric(self): return False def as_kernel_of_size(obj, kernel_size): """ Returns a kernel of desired size. :param obj: Either an existing kernel or a kernel function. A kernel function takes two arguments. First a `Quantity 1D` of evaluation time points and second a kernel size. :type obj: Kernel or func :param kernel_size: Desired size of the kernel. :type kernel_size: Quantity 1D :returns: A :class:`Kernel` with the desired kernel size. If `obj` is already a :class:`Kernel` instance, a shallow copy of this instance with changed kernel size will be returned. If `obj` is a function it will be wrapped in a :class:`Kernel` instance. :rtype: :class:`Kernel` """ if isinstance(obj, Kernel): obj = copy.copy(obj) obj.kernel_size = kernel_size else: obj = KernelFromFunction(obj, kernel_size) return obj class SymmetricKernel(Kernel): """ Base class for symmetric kernels. """ def __init__(self, kernel_size, normalize): """ :param kernel_size: Parameter controlling the kernel size. :type kernel_size: Quantity 1D :param bool normalize: Whether to normalize the kernel to unit area. """ Kernel.__init__(self, kernel_size, normalize) def is_symmetric(self): return True def summed_dist_matrix(self, vectors, presorted=False): D = sp.empty((len(vectors), len(vectors))) if len(vectors) > 0: might_have_units = self(vectors[0]) if hasattr(might_have_units, 'units'): D = D * might_have_units.units for i in xrange(len(vectors)): for j in xrange(i, len(vectors)): D[i, j] = D[j, i] = sp.sum(self( (vectors[i] - sp.atleast_2d(vectors[j]).T).flatten())) return D class CausalDecayingExpKernel(Kernel): r""" Unnormalized: :math:`K(t) = \exp(-\frac{t}{\tau}) \Theta(t)` with :math:`\Theta(t) = \left\{\begin{array}{ll}0, & x < 0\\ 1, & x \geq 0\end{array}\right.` and kernel size :math:`\tau`. Normalized to unit area: :math:`K'(t) = \frac{1}{\tau} K(t)` """ @staticmethod def evaluate(t, kernel_size): return sp.piecewise( t, [t < 0, t >= 0], [ lambda t: 0, lambda t: sp.exp( (-t * pq.dimensionless / kernel_size).simplified)]) def _evaluate(self, t, kernel_size): return self.evaluate(t, kernel_size) def normalization_factor(self, kernel_size): return 1.0 / kernel_size def __init__(self, kernel_size=1.0 * pq.s, normalize=True): Kernel.__init__(self, kernel_size, normalize) def boundary_enclosing_at_least(self, fraction): return -self.kernel_size * sp.log(1.0 - fraction) class GaussianKernel(SymmetricKernel): r""" Unnormalized: :math:`K(t) = \exp(-\frac{t^2}{2 \sigma^2})` with kernel size :math:`\sigma` (corresponds to the standard deviation of a Gaussian distribution). Normalized to unit area: :math:`K'(t) = \frac{1}{\sigma \sqrt{2 \pi}} K(t)` """ @staticmethod def evaluate(t, kernel_size): return sp.exp( -0.5 * (t * pq.dimensionless / kernel_size).simplified ** 2) def _evaluate(self, t, kernel_size): return self.evaluate(t, kernel_size) def normalization_factor(self, kernel_size): return 1.0 / (sp.sqrt(2.0 * sp.pi) * kernel_size) def __init__(self, kernel_size=1.0 * pq.s, normalize=True): Kernel.__init__(self, kernel_size, normalize) def boundary_enclosing_at_least(self, fraction): return self.kernel_size * sp.sqrt(2.0) * \ scipy.special.erfinv(fraction + scipy.special.erf(0.0)) class LaplacianKernel(SymmetricKernel): r""" Unnormalized: :math:`K(t) = \exp(-|\frac{t}{\tau}|)` with kernel size :math:`\tau`. Normalized to unit area: :math:`K'(t) = \frac{1}{2 \tau} K(t)` """ @staticmethod def evaluate(t, kernel_size): return sp.exp( -(sp.absolute(t) * pq.dimensionless / kernel_size).simplified) def _evaluate(self, t, kernel_size): return self.evaluate(t, kernel_size) def normalization_factor(self, kernel_size): return 0.5 / kernel_size def __init__(self, kernel_size=1.0 * pq.s, normalize=True): Kernel.__init__(self, kernel_size, normalize) def boundary_enclosing_at_least(self, fraction): return -self.kernel_size * sp.log(1.0 - fraction) def summed_dist_matrix(self, vectors, presorted=False): # This implementation is based on # # Houghton, C., & Kreuz, T. (2012). On the efficient calculation of van # Rossum distances. Network: Computation in Neural Systems, 23(1-2), # 48-58. # # Note that the cited paper contains some errors: In formula (9) the # left side of the equation should be divided by two and in the last # sum in this equation it should say `j|v_i >= u_i` instead of # `j|v_i > u_i`. Also, in equation (11) it should say `j|u_i >= v_i` # instead of `j|u_i > v_i`. # # Given N vectors with n entries on average the run-time complexity is # O(N^2 * n). O(N^2 + N * n) memory will be needed. if len(vectors) <= 0: return sp.zeros((0, 0)) if not presorted: vectors = [v.copy() for v in vectors] for v in vectors: v.sort() sizes = sp.asarray([v.size for v in vectors]) values = sp.empty((len(vectors), max(1, sizes.max()))) values.fill(sp.nan) for i, v in enumerate(vectors): if v.size > 0: values[i, :v.size] = \ (v / self.kernel_size * pq.dimensionless).simplified exp_diffs = sp.exp(values[:, :-1] - values[:, 1:]) markage = sp.zeros(values.shape) for u in xrange(len(vectors)): markage[u, 0] = 0 for i in xrange(sizes[u] - 1): markage[u, i + 1] = (markage[u, i] + 1.0) * exp_diffs[u, i] # Same vector terms D = sp.empty((len(vectors), len(vectors))) D[sp.diag_indices_from(D)] = sizes + 2.0 * sp.sum(markage, axis=1) # Cross vector terms for u in xrange(D.shape[0]): all_ks = sp.searchsorted(values[u], values, 'left') - 1 for v in xrange(u): js = sp.searchsorted(values[v], values[u], 'right') - 1 ks = all_ks[v] slice_j = sp.s_[sp.searchsorted(js, 0):sizes[u]] slice_k = sp.s_[sp.searchsorted(ks, 0):sizes[v]] D[u, v] = sp.sum( sp.exp(values[v][js[slice_j]] - values[u][slice_j]) * (1.0 + markage[v][js[slice_j]])) D[u, v] += sp.sum( sp.exp(values[u][ks[slice_k]] - values[v][slice_k]) * (1.0 + markage[u][ks[slice_k]])) D[v, u] = D[u, v] if self.normalize: normalization = self.normalization_factor(self.kernel_size) else: normalization = 1.0 return normalization * D class RectangularKernel(SymmetricKernel): r""" Unnormalized: :math:`K(t) = \left\{\begin{array}{ll}1, & |t| < \tau \\ 0, & |t| \geq \tau\end{array} \right.` with kernel size :math:`\tau` corresponding to the half width. Normalized to unit area: :math:`K'(t) = \frac{1}{2 \tau} K(t)` """ @staticmethod def evaluate(t, half_width): return sp.absolute(t) < half_width def _evaluate(self, t, kernel_size): return self.evaluate(t, kernel_size) def normalization_factor(self, half_width): return 0.5 / half_width def __init__(self, half_width=1.0 * pq.s, normalize=True): Kernel.__init__(self, half_width, normalize) def boundary_enclosing_at_least(self, fraction): return self.kernel_size class TriangularKernel(SymmetricKernel): r""" Unnormalized: :math:`K(t) = \left\{ \begin{array}{ll}1 - \frac{|t|}{\tau}, & |t| < \tau \\ 0, & |t| \geq \tau \end{array} \right.` with kernel size :math:`\tau` corresponding to the half width. Normalized to unit area: :math:`K'(t) = \frac{1}{\tau} K(t)` """ @staticmethod def evaluate(t, half_width): return sp.maximum( 0.0, (1.0 - sp.absolute(t.rescale(half_width.units)) * pq.dimensionless / half_width).magnitude) def _evaluate(self, t, kernel_size): return self.evaluate(t, kernel_size) def normalization_factor(self, half_width): return 1.0 / half_width def __init__(self, half_width=1.0 * pq.s, normalize=True): Kernel.__init__(self, half_width, normalize) def boundary_enclosing_at_least(self, fraction): return self.kernel_size def discretize_kernel( kernel, sampling_rate, area_fraction=default_kernel_area_fraction, num_bins=None, ensure_unit_area=False): """ Discretizes a kernel. :param kernel: The kernel or kernel function. If a kernel function is used it should take exactly one 1-D array as argument. :type kernel: :class:`Kernel` or function :param float area_fraction: Fraction between 0 and 1 (exclusive) of the integral of the kernel which will be at least covered by the discretization. Will be ignored if `num_bins` is not `None`. If `area_fraction` is used, the kernel has to provide a method :meth:`boundary_enclosing_at_least` (see :meth:`.Kernel.boundary_enclosing_at_least`). :param sampling_rate: Sampling rate for the discretization. The unit will typically be a frequency unit. :type sampling_rate: Quantity scalar :param int num_bins: Number of bins to use for the discretization. :param bool ensure_unit_area: If `True`, the area of the discretized kernel will be normalized to 1.0. :rtype: Quantity 1D """ t_step = 1.0 / sampling_rate if num_bins is not None: start = -num_bins // 2 stop = num_bins // 2 elif area_fraction is not None: boundary = kernel.boundary_enclosing_at_least(area_fraction) if hasattr(boundary, 'rescale'): boundary = boundary.rescale(t_step.units) start = sp.ceil(-boundary / t_step) stop = sp.floor(boundary / t_step) + 1 else: raise ValueError( "One of area_fraction and num_bins must not be None.") k = kernel(sp.arange(start, stop) * t_step) if ensure_unit_area: k /= sp.sum(k) * t_step return k def smooth( binned, kernel, sampling_rate, mode='same', **kernel_discretization_params): """ Smoothes a binned representation (e.g. of a spike train) by convolving with a kernel. :param binned: Bin array to smooth. :type binned: 1-D array :param kernel: The kernel instance to convolve with. :type kernel: :class:`Kernel` :param sampling_rate: The sampling rate which will be used to discretize the kernel. It should be equal to the sampling rate used to obtain `binned`. The unit will typically be a frequency unit. :type sampling_rate: Quantity scalar :param mode: * 'same': The default which returns an array of the same size as `binned` * 'full': Returns an array with a bin for each shift where `binned` and the discretized kernel overlap by at least one bin. * 'valid': Returns only the discretization bins where the discretized kernel and `binned` completely overlap. See also `numpy.convolve `_. :type mode: {'same', 'full', 'valid'} :param dict kernel_discretization_params: Additional discretization arguments which will be passed to :func:`.discretize_kernel`. :returns: The smoothed representation of `binned`. :rtype: Quantity 1D """ k = discretize_kernel( kernel, sampling_rate=sampling_rate, **kernel_discretization_params) return scipy.signal.convolve(binned, k, mode) * k.units def st_convolve( train, kernel, sampling_rate, mode='same', binning_params=None, kernel_discretization_params=None): """ Convolves a :class:`neo.core.SpikeTrain` with a kernel. :param train: Spike train to convolve. :type train: :class:`neo.core.SpikeTrain` :param kernel: The kernel instance to convolve with. :type kernel: :class:`Kernel` :param sampling_rate: The sampling rate which will be used to bin the spike train. The unit will typically be a frequency unit. :type sampling_rate: Quantity scalar :param mode: * 'same': The default which returns an array covering the whole duration of the spike train `train`. * 'full': Returns an array with additional discretization bins in the beginning and end so that for each spike the whole discretized kernel is included. * 'valid': Returns only the discretization bins where the discretized kernel and spike train completely overlap. See also :func:`scipy.signal.convolve`. :type mode: {'same', 'full', 'valid'} :param dict binning_params: Additional discretization arguments which will be passed to :func:`.tools.bin_spike_trains`. :param dict kernel_discretization_params: Additional discretization arguments which will be passed to :func:`.discretize_kernel`. :returns: The convolved spike train, the boundaries of the discretization bins :rtype: (Quantity 1D, Quantity 1D with the inverse units of `sampling_rate`) """ if binning_params is None: binning_params = {} if kernel_discretization_params is None: kernel_discretization_params = {} binned, bins = tools.bin_spike_trains( {0: [train]}, sampling_rate, **binning_params) binned = binned[0][0] #sampling_rate = binned.size / (bins[-1] - bins[0]) result = smooth( binned, kernel, sampling_rate, mode, **kernel_discretization_params) assert (result.size - binned.size) % 2 == 0 num_additional_bins = (result.size - binned.size) // 2 if len(binned): bins = sp.linspace( bins[0] - num_additional_bins / sampling_rate, bins[-1] + num_additional_bins / sampling_rate, result.size + 1) else: bins = [] * pq.s return result, bins spykeutils-0.4.3/spykeutils/_scipy_quantities.py0000644000175000017500000000713312664623646020425 0ustar robrobimport scipy as sp import quantities as pq # At least up to quantities 0.10.1 the scipy element-wise minimum and maximum # function did not work. # This has been reported upstream as issue #53: # def _fix_binary_scipy_function_with_out_param(f): def _fixed(x1, x2, out=None): if isinstance(x1, pq.Quantity) or isinstance(x2, pq.Quantity): x1 = x1 * pq.dimensionless x2 = x2 * pq.dimensionless if out is None: x2 = x2.rescale(x1.units) return f(x1.magnitude, x2.magnitude) * x1.units else: x1 = x1.rescale(out.units) x2 = x2.rescale(out.units) f(x1.magnitude, x2.magnitude, out.magnitude) return out else: return f(x1, x2, out) return _fixed minimum = _fix_binary_scipy_function_with_out_param(sp.minimum) maximum = _fix_binary_scipy_function_with_out_param(sp.maximum) # At least up to quantities 0.10.1 the scipy meshgrid and concatenate # functions did lose units. # This has been reported upstream as issue #47: # # Fixed with scipy 0.17 def _fix_scipy_meshgrid(f): def _fixed(x, y): rx, ry = f(x, y) if isinstance(x, pq.Quantity) and not isinstance(rx, pq.Quantity): rx = rx * x.units if isinstance(y, pq.Quantity) and not isinstance(ry, pq.Quantity): ry = ry * y.units return rx, ry return _fixed if sp.__version__ < '0.17': meshgrid = _fix_scipy_meshgrid(sp.meshgrid) else: meshgrid = sp.meshgrid def _fix_scipy_concatenate(f): def _fixed(arrays, axis=0): is_quantity = len(arrays) > 0 and isinstance(arrays[0], pq.Quantity) if is_quantity: arrays = [(a * pq.dimensionless).rescale(arrays[0].units) for a in arrays] else: for a in arrays: if (isinstance(a, pq.Quantity) and a.units.simplified != pq.dimensionless): raise ValueError( 'Cannot concatenate arrays of different units') concatenated = f(arrays, axis=axis) if is_quantity: concatenated = concatenated * arrays[0].units return concatenated return _fixed concatenate = _fix_scipy_concatenate(sp.concatenate) # At least up to quantities 0.10.1 the scipy inner and diag functions did not # respect units. def _fix_binary_scipy_function(f): def _fixed(x1, x2): if isinstance(x1, pq.Quantity) or isinstance(x2, pq.Quantity): x1 = x1 * pq.dimensionless x2 = x2 * pq.dimensionless x2 = x2.rescale(x1.units) return f(x1.magnitude, x2.magnitude) * x1.units else: return f(x1, x2) return _fixed inner = _fix_binary_scipy_function(sp.inner) # diag loses units # Fixed with scipy 0.17 def diag(v, k=0): if isinstance(v, pq.Quantity): r = sp.diag(v, k) return r if isinstance(r, pq.Quantity) else r * v.units else: return sp.diag(v, k) # linspace loses unit for only one bin, see bug report # # Fixed with scipy 0.17 def linspace(start, stop, num=50, endpoint=True, retstep=False): if int(num) == 1 and isinstance(start, pq.Quantity): r = sp.linspace(start, stop, num, endpoint, retstep) return r if isinstance(r, pq.Quantity) else r * start.units else: return sp.linspace(start, stop, num, endpoint, retstep) spykeutils-0.4.3/spykeutils/correlations.py0000644000175000017500000001142412664623646017373 0ustar robrobimport scipy as sp from collections import OrderedDict import quantities as pq from progress_indicator import ProgressIndicator from . import SpykeException def correlogram(trains, bin_size, max_lag=500 * pq.ms, border_correction=True, per_second=True, unit=pq.ms, progress=None): """ Return (cross-)correlograms from a dictionary of spike train lists for different units. :param dict trains: Dictionary of :class:`neo.core.SpikeTrain` lists. :param bin_size: Bin size (time). :type bin_size: Quantity scalar :param max_lag: Cut off (end time of calculated correlogram). :type max_lag: Quantity scalar :param bool border_correction: Apply correction for less data at higher timelags. Not perfect for bin_size != 1*``unit``, especially with large ``max_lag`` compared to length of spike trains. :param bool per_second: If ``True``, counts returned are per second. Otherwise, counts per spike train are returned. :param Quantity unit: Unit of X-Axis. :param progress: A ProgressIndicator object for the operation. :type progress: :class:`.progress_indicator.ProgressIndicator` :returns: Two values: * An ordered dictionary indexed with the indices of ``trains`` of ordered dictionaries indexed with the same indices. Entries of the inner dictionaries are the resulting (cross-)correlograms as numpy arrays. All crosscorrelograms can be indexed in two different ways: ``c[index1][index2]`` and ``c[index2][index1]``. * The bins used for the correlogram calculation. :rtype: dict, Quantity 1D """ if not progress: progress = ProgressIndicator() bin_size.rescale(unit) max_lag.rescale(unit) # Create bins, making sure that 0 is at the center of central bin half_bins = sp.arange(bin_size / 2, max_lag, bin_size) all_bins = list(reversed(-half_bins)) all_bins.extend(half_bins) bins = sp.array(all_bins) * unit middle_bin = len(bins) / 2 - 1 indices = trains.keys() num_trains = len(trains[indices[0]]) if not num_trains: raise SpykeException('Could not create correlogram: No spike trains!') for u in range(1, len(indices)): if len(trains[indices[u]]) != num_trains: raise SpykeException('Could not create correlogram: All units ' + 'need the same number of spike trains!') progress.set_ticks(sp.sum(range(len(trains) + 1) * num_trains)) corrector = 1 if border_correction: # Need safe min/max functions def safe_max(seq): if len(seq) < 1: return 0 return max(seq) def safe_min(seq): if len(seq) < 1: return 2 ** 22 # Some arbitrary large value return min(seq) max_w = max([max([safe_max(t) for t in l]) for l in trains.itervalues()]) min_w = min([min([safe_min(t) for t in l]) for l in trains.itervalues()]) train_length = (max_w - min_w) l = int(round(middle_bin)) + 1 cE = max(train_length - (l * bin_size) + 1 * unit, 1 * unit) corrector = (train_length / sp.concatenate( (sp.linspace(cE, train_length, l - 1, False), sp.linspace(train_length, cE, l)))).magnitude correlograms = OrderedDict() for i1 in xrange(len(indices)): # For each index # For all later indices, including itself for i2 in xrange(i1, len(indices)): histogram = sp.zeros(len(bins) - 1) for t in xrange(num_trains): train1 = trains[indices[i1]][t].rescale(unit).reshape((1, -1)) train2 = trains[indices[i2]][t].rescale(unit).reshape((-1, 1)) histogram += sp.histogram( sp.subtract(train1, train2), bins=bins)[0] if i1 == i2: # Correction for autocorrelogram histogram[middle_bin] -= len(train2) progress.step() if per_second: l = train1.t_stop - train1.t_start if train2.t_stop - train2.t_start != l: raise SpykeException( 'A spike train pair does not have equal length,' 'cannot calculate count per second.') histogram /= l.rescale(pq.s) crg = corrector * histogram / num_trains if indices[i1] not in correlograms: correlograms[indices[i1]] = OrderedDict() correlograms[indices[i1]][indices[i2]] = crg if i1 != i2: if indices[i2] not in correlograms: correlograms[indices[i2]] = OrderedDict() correlograms[indices[i2]][indices[i1]] = crg[::-1] return correlograms, binsspykeutils-0.4.3/spykeutils/stationarity.py0000644000175000017500000000707412664623646017427 0ustar robrobimport scipy as sp import quantities as pq from progress_indicator import ProgressIndicator from . import SpykeException def spike_amplitude_histogram(trains, num_bins, uniform_y_scale=True, unit=pq.uV, progress=None): """ Return a spike amplitude histogram. The resulting is useful to assess the drift in spike amplitude over a longer recording. It shows histograms (one for each ``trains`` entry, e.g. segment) of maximum and minimum spike amplitudes. :param list trains: A list of lists of :class:`neo.core.SpikeTrain` objects. Each entry of the outer list will be one point on the x-axis (they could correspond to segments), all amplitude occurences of spikes contained in the inner list will be added up. :param int num_bins: Number of bins for the histograms. :param bool uniform_y_scale: If True, the histogram for each channel will use the same bins. Otherwise, the minimum bin range is computed separately for each channel. :param Quantity unit: Unit of Y-Axis. :param progress: Set this parameter to report progress. :type progress: :class:`.progress_indicator.ProgressIndicator` :return: A tuple with three values: * A three-dimensional histogram matrix, where the first dimension corresponds to bins, the second dimension to the entries of ``trains`` (e.g. segments) and the third dimension to channels. * A list of the minimum amplitude value for each channel (all values will be equal if ``uniform_y_scale`` is true). * A list of the maximum amplitude value for each channel (all values will be equal if ``uniform_y_scale`` is true). :rtype: (ndarray, list, list) """ if not progress: progress = ProgressIndicator() num_channels = 1 for t in trains: if not t: continue num_channels = t[0].waveforms.shape[2] break progress.set_ticks(2*len(trains)) progress.set_status('Calculating Spike Amplitude Histogram') # Find maximum and minimum amplitudes on all channels up = [0] * num_channels down = [0] * num_channels for t in trains: for s in t: if s.waveforms is None: continue if s.waveforms.shape[2] != num_channels: raise SpykeException('All spikes need to have the same ' + 'numer of channels for Spike Amplitude Histogram!') a = sp.asarray(s.waveforms.rescale(unit)) u = a.max(1) d = a.min(1) for c in xrange(num_channels): up[c] = max(up[c], sp.stats.mstats.mquantiles( u[:,c], [0.999])[0]) down[c] = min(down[c], sp.stats.mstats.mquantiles( d[:,c], [0.001])[0]) progress.step() if uniform_y_scale: up = [max(up)] * num_channels down = [min(down)] * num_channels # Create histogram bins = [sp.linspace(down[c],up[c], num_bins+1) for c in xrange(num_channels)] hist = sp.zeros((num_bins, len(trains), num_channels)) for i, t in enumerate(trains): for s in t: if s.waveforms is None: continue a = sp.asarray(s.waveforms.rescale(unit)) upper = a.max(1) lower = a.min(1) for c in xrange(num_channels): hist[:,i,c] += sp.histogram(upper[:,c], bins[c])[0] hist[:,i,c] += sp.histogram(lower[:,c], bins[c])[0] progress.step() return hist, down, up spykeutils-0.4.3/spykeutils/sorting_quality_assesment.py0000644000175000017500000004017212664623646022210 0ustar robrob""" Functions for estimating the quality of spike sorting results. These functions estimate false positive and false negative fractions. """ from __future__ import division import scipy as sp from scipy.spatial.distance import cdist import quantities as pq import neo from progress_indicator import ProgressIndicator from . import SpykeException from conversions import spikes_to_spike_train def get_refperiod_violations(spike_trains, refperiod, progress=None): """ Return the refractory period violations in the given spike trains for the specified refractory period. :param dict spike_trains: Dictionary of lists of :class:`neo.core.SpikeTrain` objects. :param refperiod: The refractory period (time). :type refperiod: Quantity scalar :param progress: Set this parameter to report progress. :type progress: :class:`.progress_indicator.ProgressIndicator` :returns: Two values: * The total number of violations. * A dictionary (with the same indices as ``spike_trains``) of arrays with violation times (Quantity 1D with the same unit as ``refperiod``) for each spike train. :rtype: int, dict """ if type(refperiod) != pq.Quantity or \ refperiod.simplified.dimensionality != pq.s.dimensionality: raise ValueError('refperiod must be a time quantity!') if not progress: progress = ProgressIndicator() total_violations = 0 violations = {} for u, tL in spike_trains.iteritems(): violations[u] = [] for i, t in enumerate(tL): st = t.copy() st.sort() isi = sp.diff(st) violations[u].append(st[isi < refperiod].rescale(refperiod.units)) total_violations += len(violations[u][i]) progress.step() return total_violations, violations def calculate_refperiod_fp(num_spikes, refperiod, violations, total_time): """ Return the rate of false positives calculated from refractory period calculations for each unit. The equation used is described in (Hill et al. The Journal of Neuroscience. 2011). :param dict num_spikes: Dictionary of total number of spikes, indexed by unit. :param refperiod: The refractory period (time). If the spike sorting algorithm includes a censored period (a time after a spike during which no new spikes can be found), subtract it from the refractory period before passing it to this function. :type refperiod: Quantity scalar :param dict violations: Dictionary of total number of violations, indexed the same as num_spikes. :param total_time: The total time in which violations could have occured. :type total_time: Quantity scalar :returns: A dictionary of false positive rates indexed by unit. Note that values above 0.5 can not be directly interpreted as a false positive rate! These very high values can e.g. indicate that the generating processes are not independent. """ if type(refperiod) != pq.Quantity or \ refperiod.simplified.dimensionality != pq.s.dimensionality: raise ValueError('refperiod must be a time quantity!') fp = {} factor = total_time / (2 * refperiod) for u, n in num_spikes.iteritems(): if n == 0: fp[u] = 0 continue zw = (violations[u] * factor / n ** 2).simplified if zw > 0.25: fp[u] = 0.5 + sp.sqrt(0.25 - zw).imag continue fp[u] = 0.5 - sp.sqrt(0.25 - zw) return fp def _multi_norm(x, mean): """ Evaluate pdf of multivariate normal distribution with a mean at rows of x with high precision. """ d = x.shape[1] fac = (2 * sp.pi) ** (-d / 2.0) y = cdist(x, sp.atleast_2d(mean), 'sqeuclidean') * -0.5 return fac * sp.exp(sp.longdouble(y)) def _fast_overlap_whitened(spike_arrays, means): units = spike_arrays.keys() spikes = {u: spike_arrays[u].shape[1] for u in spike_arrays.iterkeys()} prior = {} total_spikes = 0 for u, mean in means.iteritems(): total_spikes += spikes[u] if total_spikes < 1: return {u: (0.0, 0.0) for u in units}, {} # Arrays of unnormalized posteriors (likelihood times prior) # for all units posterior = {} false_positive = {} false_negative = {} for u in units: prior[u] = spikes[u] / total_spikes false_positive[u] = 0 false_negative[u] = 0 # Calculate posteriors for u1 in units[:]: if not spikes[u1]: units.remove(u1) continue posterior[u1] = {} for u2, mean in means.iteritems(): llh = _multi_norm(spike_arrays[u1].T, mean) posterior[u1][u2] = llh * prior[u2] # Calculate pairwise false positives/negatives singles = {u: {} for u in units} for i, u1 in enumerate(units): u1 = units[i] for u2 in units[i + 1:]: f1 = sp.sum(posterior[u1][u2] / (posterior[u1][u1] + posterior[u1][u2]), dtype=sp.double) f2 = sp.sum(posterior[u2][u1] / (posterior[u2][u1] + posterior[u2][u2]), dtype=sp.double) singles[u1][u2] = (f1 / spikes[u1] if spikes[u1] else 0, f2 / spikes[u1] if spikes[u1] else 0) singles[u2][u1] = (f2 / spikes[u2] if spikes[u2] else 0, f1 / spikes[u2] if spikes[u2] else 0) # Calculate complete false positives/negatives with extended bayes for u1 in units: numerator = posterior[u1][u1] normalizer = sum(posterior[u1][u2] for u2 in units) false_positive[u1] = sp.sum((normalizer - numerator) / normalizer) other_units = units[:] other_units.remove(u1) numerator = sp.vstack((posterior[u][u1] for u in other_units)) normalizer = sp.vstack(sum(posterior[u][u2] for u2 in units) for u in other_units) false_negative[u1] = sp.sum(numerator / normalizer) # Prepare return values, convert sums to means totals = {} for u, fp in false_positive.iteritems(): fn = false_negative[u] if not spikes[u]: totals[u] = (0, 0) else: num = spikes[u] totals[u] = (fp / num, fn / num) return totals, singles def _pair_overlap(waves1, waves2, mean1, mean2, cov1, cov2): """ Calculate FP/FN estimates for two gaussian clusters """ from sklearn import mixture means = sp.vstack([[mean1], [mean2]]) covars = sp.vstack([[cov1], [cov2]]) weights = sp.array([waves1.shape[1], waves2.shape[1]], dtype=float) weights /= weights.sum() # Create mixture of two Gaussians from the existing estimates mix = mixture.GMM(n_components=2, covariance_type='full', init_params='') mix.covars_ = covars mix.weights_ = weights mix.means_ = means posterior1 = mix.predict_proba(waves1.T)[:, 1] posterior2 = mix.predict_proba(waves2.T)[:, 0] return (posterior1.mean(), posterior2.sum() / len(posterior1), posterior2.mean(), posterior1.sum() / len(posterior2)) def _object_has_size(obj, size): """ Return if the object, which could be either a neo.Spike or ndarray, has the given size. """ if isinstance(obj, neo.Spike): return obj.waveform.size == size return obj.size == size def overlap_fp_fn(spikes, means=None, covariances=None): """ Return dicts of tuples (False positive rate, false negative rate) indexed by unit. This function needs :mod:`sklearn` if ``covariances`` is not set to ``'white'``. This function estimates the pairwise and total false positive and false negative rates for a number of waveform clusters. The results can be interpreted as follows: False positives are the fraction of spikes in a cluster that is estimated to belong to a different cluster (a specific cluster for pairwise results or any other cluster for total results). False negatives are the number spikes from other clusters that are estimated to belong to a given cluster (also expressed as fraction, this number can be larger than 1 in extreme cases). Details for the calculation can be found in (Hill et al. The Journal of Neuroscience. 2011). The calculation for total false positive and false negative rates does not follow Hill et al., who propose a simple addition of pairwise probabilities. Instead, the total error probabilities are estimated using all clusters at once. :param dict spikes: Dictionary, indexed by unit, of lists of spike waveforms as :class:`neo.core.Spike` objects or numpy arrays. If the waveforms have multiple channels, they will be flattened automatically. All waveforms need to have the same number of samples. :param dict means: Dictionary, indexed by unit, of lists of spike waveforms as :class:`neo.core.Spike` objects or numpy arrays. Means for units that are not in this dictionary will be estimated using the spikes. Note that if you pass ``'white'`` for ``covariances`` and you want to provide means, they have to be whitened in the same way as the spikes. Default: None, means will be estimated from data. :param covariances: Dictionary, indexed by unit, of lists of covariance matrices. Covariances for units that are not in this dictionary will be estimated using the spikes. It is useful to give a covariance matrix if few spikes are present - consider using the noise covariance. If you use prewhitened spikes (i.e. all clusters are normal distributed, so their covariance matrix is the identity), you can pass ``'white'`` here. The calculation will be much faster in this case and the sklearn package is not required. Default: None, covariances will estimated from data. :type covariances: dict or str :returns: Two values: * A dictionary (indexed by unit) of total (false positive rate, false negative rate) tuples. * A dictionary of dictionaries, both indexed by units, of pairwise (false positive rate, false negative rate) tuples. :rtype: dict, dict """ units = spikes.keys() total_spikes = 0 for spks in spikes.itervalues(): total_spikes += len(spks) if total_spikes < 1: return {u: (0.0, 0.0) for u in units}, {} if means is None: means = {} white = False if covariances is None: covariances = {} elif covariances == 'white': white = True covariances = {} # Convert Spike objects to arrays dimensionality = None spike_arrays = {} for u, spks in spikes.iteritems(): spikelist = [] if not spks or (len(spks) < 2 and u not in covariances): units.remove(u) continue for s in spks: if isinstance(s, neo.Spike): spikelist.append( sp.asarray(s.waveform.rescale(pq.uV)).T.flatten()) else: spikelist.append(s) spike_arrays[u] = sp.array(spikelist).T if dimensionality is None: dimensionality = spike_arrays[u].shape[0] elif dimensionality != spike_arrays[u].shape[0]: raise SpykeException('All spikes need to have the same number' 'of samples!') if not units: return {}, {} if len(units) == 1: return {units[0]: (0.0, 0.0)}, {} # Convert or calculate means and covariances shaped_means = {} covs = {} if white: cov = sp.eye(dimensionality) covariances = {u: cov for u in units} for u in units: if u in means and _object_has_size(means[u], dimensionality): mean = means[u] if isinstance(mean, neo.Spike): shaped_means[u] = sp.asarray( mean.waveform.rescale(pq.uV)).T.flatten() else: shaped_means[u] = means[u].T.flatten() else: shaped_means[u] = spike_arrays[u].mean(axis=1) if white: return _fast_overlap_whitened(spike_arrays, shaped_means) for u in units: if u not in covariances: covs[u] = sp.cov(spike_arrays[u]) else: covs[u] = covariances[u] # Calculate pairwise false positives/negatives singles = {u: {} for u in units} for i, u1 in enumerate(units): u1 = units[i] for u2 in units[i + 1:]: error_rates = _pair_overlap( spike_arrays[u1], spike_arrays[u2], shaped_means[u1], shaped_means[u2], covs[u1], covs[u2]) singles[u1][u2] = error_rates[0:2] singles[u2][u1] = error_rates[2:4] # Calculate complete false positives/negatives import sklearn mix = sklearn.mixture.GMM(n_components=2, covariance_type='full') mix_means = [] mix_covars = [] mix_weights = [] for u in units: mix_means.append(shaped_means[u]) mix_covars.append([covs[u]]) mix_weights.append(spike_arrays[u].shape[1]) mix.means_ = sp.vstack(mix_means) mix.covars_ = sp.vstack(mix_covars) mix_weights = sp.array(mix_weights, dtype=float) mix_weights /= mix_weights.sum() mix.weights_ = mix_weights # P(spikes of unit[i] in correct cluster) post_mean = sp.zeros(len(units)) # sum(P(spikes of unit[i] in cluster[j]) post_sum = sp.zeros((len(units), len(units))) for i, u in enumerate(units): posterior = mix.predict_proba(spike_arrays[u].T) post_mean[i] = posterior[:, i].mean() post_sum[i, :] = posterior.sum(axis=0) totals = {} for i, u in enumerate(units): fp = 1.0 - post_mean[i] ind = range(len(units)) ind.remove(i) fn = post_sum[ind, i].sum() / float(spike_arrays[u].shape[1]) totals[u] = (fp, fn) return totals, singles def variance_explained(spikes, means=None, noise=None): """ Returns the fraction of variance in each channel that is explained by the means. Values below 0 or above 1 for large data sizes indicate that some assumptions were incorrect (e.g. about channel noise) and the results should not be trusted. :param dict spikes: Dictionary, indexed by unit, of :class:`neo.core.SpikeTrain` objects (where the ``waveforms`` member includes the spike waveforms) or lists of :class:`neo.core.Spike` objects. :param dict means: Dictionary, indexed by unit, of lists of spike waveforms as :class:`neo.core.Spike` objects or numpy arrays. Means for units that are not in this dictionary will be estimated using the spikes. Default: None - means will be estimated from given spikes. :type noise: Quantity 1D :param noise: The known noise levels (as variance) per channel of the original data. This should be estimated from the signal periods that do not contain spikes, otherwise the explained variance could be overestimated. If None, the estimate of explained variance is done without regard for noise. Default: None :return dict: A dictionary of arrays, both indexed by unit. If ``noise`` is ``None``, the dictionary contains the fraction of explained variance per channel without taking noise into account. If ``noise`` is given, it contains the fraction of variance per channel explained by the means and given noise level together. """ ret = {} if means is None: means = {} for u, spks in spikes.iteritems(): train = spks if not isinstance(train, neo.SpikeTrain): train = spikes_to_spike_train(spks) if u in means and means[u].waveform.shape[0] == train.waveforms.shape[1]: spike = means[u] else: spike = neo.Spike(0) spike.waveform = sp.mean(train.waveforms, axis=0) orig = sp.mean(sp.var(train.waveforms, axis=1), axis=0) waves = train.waveforms - spike.waveform new = sp.mean(sp.var(waves, axis=1), axis=0) if noise is not None: ret[u] = sp.asarray(1 - (new - noise) / orig) else: ret[u] = sp.asarray(1 - new / orig) return retspykeutils-0.4.3/spykeutils/__init__.py0000644000175000017500000000355412664623646016433 0ustar robrob""" .. autoclass:: spykeutils.SpykeException :mod:`conversions` Module ------------------------- .. automodule:: spykeutils.conversions :members: :undoc-members: :show-inheritance: :mod:`correlations` Module -------------------------- .. automodule:: spykeutils.correlations :members: :mod:`progress_indicator` Module -------------------------------- .. automodule:: spykeutils.progress_indicator :members: :undoc-members: :show-inheritance: :mod:`rate_estimation` Module ----------------------------- .. automodule:: spykeutils.rate_estimation :members: :mod:`signal_processing` Module ------------------------------- .. automodule:: spykeutils.signal_processing :members: :show-inheritance: :undoc-members: :mod:`spike_train_generation` Module ------------------------------------ .. automodule:: spykeutils.spike_train_generation :members: :undoc-members: :mod:`spike_train_metrics` Module ------------------------------------ .. automodule:: spykeutils.spike_train_metrics :members: :undoc-members: :mod:`sorting_quality_assesment` Module --------------------------------------- .. automodule:: spykeutils.sorting_quality_assesment :members: :undoc-members: :show-inheritance: :mod:`stationarity` Module -------------------------- .. automodule:: spykeutils.stationarity :members: :mod:`tools` Module ------------------------ .. automodule:: spykeutils.tools :members: """ __version__ = '0.4.3' class SpykeException(Exception): """ Exception thrown when a function in spykeutils encounters a problem that is not covered by standard exceptions. When using Spyke Viewer, these exceptions will be caught and shown in the GUI, while general exceptions will not be caught (and therefore be visible in the console) for easier debugging. """ pass spykeutils-0.4.3/doc/0000755000175000017500000000000012664623646012644 5ustar robrobspykeutils-0.4.3/doc/make.bat0000644000175000017500000001002612664623646014250 0ustar robrob@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. changes to make an overview over all changed/added/deprecated items echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\spykeutils.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\spykeutils.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) :end spykeutils-0.4.3/doc/Makefile0000644000175000017500000001100212664623646014276 0ustar robrob# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/spykeutils.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/spykeutils.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/spykeutils" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/spykeutils" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." make -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." spykeutils-0.4.3/doc/source/0000755000175000017500000000000012664623646014144 5ustar robrobspykeutils-0.4.3/doc/source/img/0000755000175000017500000000000012664623646014720 5ustar robrobspykeutils-0.4.3/doc/source/img/signal.png0000644000175000017500000013351412664623646016712 0ustar robrobPNG  IHDR7bKGD pHYs  tIMEO8tEXtCommentCreated with GIMPW IDATxyx?ϬYFB("* E*K}Zڟ˫˫TkmuCKZZ+ "Y2&&L2{&s]̳s>ssK/,4MTRRrOaa!YYYX,l6B!Bv|>@Euu5UUUˬVF06WǏ5ꑜ7p,'B!D\. <444vVcE]!C|X\\gO>ڵKVB!"7x;vym>o`~~>gu4innB!άYx7ذaKIIɋ&v,$B!D 8Nuƌw|>(B!D v8[,999dgg2B!BAnn.6łiw?Kxꏸ",J[\ip?^̈́/Ru2q!ua->_4[p8_65A-ˆܙXz ,vK8@~p8Z.mt``#n$5s 0O3S=xWA9_Vw4-Z5LB\v9G B$k!Ż5|ݷ9~Ũ 4r;/mߝáSRdl8\ 붱m(1ا'{0h8pvjNl*|6{e(jqWa͖:rPld !R \z<gKn]Ybn_OF(Cm+dĔ8lOKdeS8v( myw?MT70QrPοyś1 fỬqCc{<;X ;f~vlYh\ ̻=sĖb(Z|/Φ8 zd91%vAOdL| F0eZ;\BEA>9q 2KCgvoe3ϱnO||. \| m98އ ,yvZW!V[5Bф9f1Ugٮr-RwzLnG]˝gd~voNٟ(pTa'vK~Fk<~~V~1zT5FQi)]ȃ헹rů.alz [vUyIM-!N'x"~)uuu,\@ 餰#Mx;j2B= 66~ٰ0`&Sߏ?'%?Up\L6>ypL<q&*ހ gT1X 1>Z'5-УNy4zy kGY[瘚47z 8 p5|ΰZ؂݋܍Y&m. _M;w(ϛ©N>ϳ_;oVbڐA¤͈fәN< :[굨Ԇ+Z[|̐6s]Mڕi0s;ܓ>O]%[ꢍnjk`I{B$Lnnn'19U@Rعs'uu 2 5D84nM ,{YQt!"xk[c2@2O')0pC!e+640tt;VyJ}3B6Rv)g<\M1;fS3hvF b(IODw[>8GLagӊuX840ٹmeowc1P( !bK/5k} /믿駟k{<VZfo߾׳b F"Uаq9΅w{Fv^,M4 +ݻl/#-.,kΊMm[zBcNd*}?-!V C?M^6s>qi|NC4lXV~q%X@f)aM}"QI;3%fàYC03kK;?`8q_g׬.D7K˵SAuM6Knn.ti|r`Ĉq]W+ +f=˷|z%;wߵd|YaVK1\~Mj֮`o40dxaeǎa@kw=^{!L<v4]a2|3fwˉ}:I(,7 _3N,!Zf"M}"ʼ 8~,|ToKal#oV?iaU[M.7bH-&\6nMMux,Bd:YYYI}q„ rAQSSCmm-|0vaq_ט={9k,yL(gJ@s%7Hh[~rCRhOʫ=l J5XamڕKb' l^F8/)8?0}4P& x<<-__XJLˋ * +E!f5ݻv2o (d' m2g^S.9C }Qfi]"4*e"lxع]7a}mE(`DcgoPհq}Y U4 # xv|W҄|7+I{۸ֵlך!Bd2?WNl6ܹ%V+%%%KX ѳ?2WZ@VZNA휨u/B1|ͱgc!A(_J 0UKl !Ř={9sLYB3_=˛ijnsG0p@v{Zرc>5 %!BHXw?~?_|'OfȐ!deeaEyyyv ƌ#a-B!a= TUU1`N'999a{ h7M#}Pk!B /m6VՊfk'؝$uV+@@Z!Bº@ oAl&@1B8n+~n{l$"9PYBHX$zq:a+qNT=#aV"\+ηB*kB kd~?6 ׻ovu $ h=^H9} :EPYBHX$zZa3 uZґXᎍ*"5PYBHX'xC1&c녕Ygºnp9h I^dW(?Sfnjʕ+3i$v|8Ci!6ںMUMd9X6vl+g2[X6nws nw`: k!4aR ?C9+4χiv6UlLԺ\2ד;Fmno;[)$( j4ϫ^Ͼ[Ivv&LQ qݑoc˖-cܸq~ H|8n`_j#+xppgsuץE\W oVLӤ}= nY \& s(CƧ-66oM71sIk뵶G@ϗaBѥg|p)&ޅ:}G- 8vMC@ bI:VP|vmV(//o}(xiDli?AQQQcp7&]coqܑGj._nwY4>W%mlYUVa{SSMrrb_#/O>aŊx(((`\ׯ]ݥBja~풞`az;`~-̥|2,v=ȩ"*HP\{<ֵVCw}[W1 ʺug}ԡB;TP$c?y>9u7R9OgH:SAz"V Tz] tb rg>Z,8||qav(?~-[6qfFI~B*6f``O="mZ~lV+;em4qZRV/j0]3 /gV1M3'_=vz|G{^lLe{SOQWWGAAA\s 444 DVV<\zZZ!a$/\t6yyjzPZzԿ;vSR24ݟirs OvvvU:BEb, MmmmKˊy)Ia0f /B÷eψID0,8VhQaOnj, =uuu\{l6[GۜN'K.+B:a䒋^ae>alJ{MORRҏa4vdҤ |"\} `!7YޣaB/@E3 ӄ\n0X,;ݑ?bᖛqsIxr+'L+6a ?W_H%Cݠvo~/]o:*;>,VlGnܶ?=x>yjV1cLvN=4,+nw3=S?;\~e98Kl6jkkz*//K.୷;_~ R!u[|8vq\pI?dÆr[_III)>Yn=9}fsg,eԨQ1!t"aqq1 ,_ k֬!?/ںZu9@VXСCc//?'o7zngw0tPLdƍwq<ٴo?$rʈ}Wis>whȱwRZrmWm5j٘noI&>_BQ~1\SٴwuXN]vadصeK墹Mss3͸\Mijld}~ꤧ1uv݃u]rGw&?BQ:~r}}uu#6aiSRFXKn0߿j7}P][Oq핵X6]A49w@t8"??fWn7,^ƍپ}{9ų*jd}7Ӂbȑ4{Tn$ bFnRڊX{Sۃ#7oF߳GJ̈t|>9x㍰;feeqꩧ8\ IDATCv$*}>v2cO^1m'PXģI XӄFodڵ47|_7"}^/f7۶onDȚѣ9OvWs=#<Ԥ@Bna/ʢKt:8baL?vJz2osW-nO3 Q3ui|6nd5CnWZvb8N'eeen|>x˸7SNv-5~7P_O]uZJ8jk뀬1,Q}PTZF  g϶vq7szډ{ 4c<@&LLl-[xv 1ܜzku.+0ι^>|}M?e=>׋EAaҸFp?=OMW,N:.382^e_N'wSLr)HMM.{~[ng„,[O:;Lt)O OfʫoR[7iB=>~8֝SI]cl[ >;3_W{x{MOy8}7 fSX#+9/!ȎPis z뻋NvU)*5g7rY MAN35MSOFd.l6~!sCm3䪫bʔ>>@ ͑݀aP1 >#$S5}xFOp0jԨֵ={||1"e L8+VaV+|999;}هI& qͯBza߇gˀ䢳ض>^ΝL,x:cfZbXp\lZa 1M8YhIsss}j\]-ƍog̘1s=vm Xw_\3L=4|㇞KvN[l7q7vOX'ŧCFi!cͱUgoQ]606QSm[Jx?_~u?':`ؽ>/GCf_Hlz`'#<#ŋ3m4:N8/Sz}}=[nK/QRAL"!gc9-[p7u{rss9X*M=&\.O<ѠX,l6N'999ĵjF(;wdĈ{-:t(1HTWXzn^oo=;vફbرXh7pC̍YpWȶin;0,fpIEFʆ:o%^iӦeD޳K_"<2\S68z5WjkN.”uE5b 807;;ł# SV!,X;/86nbEVVVZ[ݖ6*Ӊlwn7ц1XhӜnQJAo0N80m*UODz Q5딳WC iwWhllL}Yք=L~:ă6(Na`Z#ߎKG^U-[ 5!H V5psk R !$EwtIol)Tք"D*HW kMIol)Tք"͢aÆQ__/!BHXwO SVVã:=YMvKT !BHX \..KB!FXd!B!$B!B!BZ!B!a-B!B!B!BHX !B!$B!B!BZ!B k!B!B!&ud!BI'ĪU:W=B!BՉN9w\9眽l+ǍFJCҖ㧂t+OJ:X#ԃv̤6'*Uψ\NSUvxWg-2WX8VfXnCK2L|ҽ:.:8;jh G4KwJzv9F.,%}ܞk_X>!?ѡx^o=TW> iy!Dd`C(2 J \" kUk LXAe]mߝe<{?3d*Iwee#8oE!߳9_+9d[iw?΍c;Cw jlJi%L-]1BYQ&chOgHGUJ:1X# /p$~T+- qcY_3⥗~A}8;9[o֛D%L`[=S wcICSjӌxz%:;?WhLݨ(VmL%CkŻL"u]OJ9nʕJ$DznrT+13h:ݩ'=#w'3S+b0)=ֺwDۋ$H#5~ *!mc t00F!\YH!HW,QmKCOtD|mGǙi|0<^̔H1H}͵єH}o,QEusiº>eb+,JW.kZ\N?~z﫬n"=X2xKTt5JJCp:іXȏ!A=F,4$4vE]ӎcHMA'sNʲ/6(ֺ'^'S:mzy^̫^dK°SFy't4 UOl8퍌Ξw6=#=NLl#tXN"+Z":;o-ܓY&;">|F#cXS m퍋xFbK2vLRFb9|Iyg"=U3_)3|8NFE{T?[g !a3}u?;1F%p`ô3&a_޽CMu!TqvW#h#nɮPy%, ǹM|,tO归eD@<@4+LSb*GW׽"T&s۟QlNKlU@3.;Ąࣘ츍70H'kCW2"p絧2k!dт%ˮ=ALf5BYT)!JX7.OΓ H.ՙߕs t$ 0WPz>[̒jlt$QĶ4B 6!\aS|<=x!rGzgu ?{2}]k`q) l3뙙q%UBZ!Bdc8;8GOxWxx s \| 3VxFc-B!RI_^Q1\yrp3^~ 7B!瓤-M/ockgI~syJj۞G@=B!"sz_y_8387xa&9m'^'G&B!Dkxsfy?DnY7VŠeZd| i!B:wwΙl{_ȶ矴l{yzB!D M;YbD|N&?csB!y$[%GXg-﫧xF8۞:|-HxsKs!Bѵd֖A,ڿWD sX'Ǻmϣ@S?B!D*t14Y`hk!Bº?_YM/M\&''m~umϣABZ!Bd9[>d[3'B!/ %2:B!"uxx!~VJ\mNc@hF^6 ʘ~:9'CAtzB!D^j7ד3/ $v ~(3ŀAO=2oKt>/zk2#NˮHD|&zSyRyo[^̫^dKBͽ5kV;wnϱ/'}GyxڦDJCɰiq'e!.Z'۶K)K1Gp'u>s]H9>SmmCGMR+7M3Ƣbj PX/88K+/ey4mb"\0,J4هigL¾n9;{em-iύ{sg,BHiG3ҵ9眘M|m#]3[cӑ}]3bd!<|ήkD*c4x`~b)󱊡6چ+۠wV'2RyI'_"Rn"h~4pxh4?X;K[e$qK&Fٙy̛ ҇À56ߨ`"ߖxSi;ܘdт%]鱎i:?J"ZY%Q@%T2$6ϩ,CHqhpED+x㡣F6Q̺3ěxel52Q%AHbi㭻ǓU'R܅:| 7?s aˊ jK89yDnH Mu6Rg~em=hgZ&ZbUNfіxҚ!d54-Q?ˋw{ݺutI{hZYunrL۵[ICi1˖j.Cc,]U¥=) ^mޣA=64ݡϏ70x%ö M4~P۵]7XHF9T#P&L#baGǚX|z0wV/"Ѵ'#sy&C뒶WxmG5M ;^n;ёڞlE"NNFPjժ`L g3׃-'+eFENCBG ^ɣ?:~+^3N9qGqj24RFw]'o v4Eѳۻtƻwc׹6SN`Wlԓ*dݹBLG3`uw]8)HNrZkUtpT)v~_5jlW'#eCGXzKFY$]A$XUMg}FXGUg-s.WjsȶF~fwzx{5L{w!$`uό=dV3WysTںQPhE !QjW3 ҲΛO IDATU(IXT+.z"\yQO6ݯ׳g;N~&? mM"BŻԩ}zy^̫^dKB6S4̱s4iHV:y*΄3VϽiKsێ $`ڬD.B!݇]%`Yfoλl+ﲽ=;@SAB!B!BZ!B kr+ʻ+ʻ.a-B!Ӥ.w!KX !B 1fϞmΘ1CB!"N,Xk!B!B!B!BHX !Bc4}*w\V6c+Xgހ׿=+w60\r`!Яgo}?rrbǻi-o= l 8{xGizYo٣8_q!i쑍>ݯy⣕loR{x!~VJ\mNc@X(:y8/4x MW`2;ywpe~Էe@]Փg7 x/8)Q+KpS0ˀyj{8/r\>LӏQRJUe؃ٷ$.PlyS0eL?m[pG G (|B˄MUb[|CMу7a;t"-;}/U[isgy`{0pMSe1~8鉪n'gp_- ~ɪB P*JBókKnRjV8_74=xL{u~-w& K$!ahy d:J-i{d}73 %=?U_/c˷Os٧sw;pu&]dEC)S0lN<Ѿ58=wX{$쟇a82xT|jHey4mMsV S`$Cd"\\Clb͂w=8Fd6/xvwQ cco w2)o?~h_֣=_z8ӯn姬þ Ɨh,GՒ MeX{1 X:laz*x = ]LsCq_]8MM"_1;z3ּcNa[~__Oy _=ٳ3fB!Bɂ 4D!Bd a-B!B!B!BHX !B!$B!H*ZXn B!8餓XjU\Z!B=:c݉1Ν9眳c-cq]HiHVڒh̴v{ޯ|؎uvn#}jxҙTO㉑Tfշ>+4޹zuk'?Y{$z9I2Tihۺ춾O&!;w扏Vq$׿GN. әY˜˯QĴƱ)KZ*..P kK.ÏϪ剟r#3XB!"sȌ9F.̾%X3@sU)B.'cg;O*+͸.3k֬0Q0 ^B(ޅ|K3xWkޗ㡅ֳf𯳧}NLs4iHV:y**&} uT"ܛc$yO=Yrv0YmV"wI7{lsƌ!??Ʉ b ͟n5k,ϟ|VI]ê apҥZ UAzZ$5Ԫ ѵA2eU00M3{uUAYz`=z^{j!#.8J* J-X xȄa.҃{g߅P=/xaȢKx(k_A'n3K}oizӑ!;Np0ʄB ֹL% K%Xg\[Lzff\wWHA〥KN)Lt|ǒhA]1y }&ÓN*{EDJ~W#(]`iB dtۡ ~RY\^Eq];Qr{pmɤb*m #D*cHz^uiLfzS/x,[ >|ϼ2D=i{~w=zrx{ﻗ޳9McwQytNp@ "*pAY<YAYuAdQX! *G r=]eY]y\wqqfz~xLW}4;Bbl=FX'~Zz#Kd?Kd.1p Gheiw3l;:w`*G{*.TG̣Vo0*gYq‘f?+"v#~#6ie죭ѭ+z5}*42Xh<_-rW Ga{!BE ;}8Сϲ@&\r##}ݣudnezڋ/9ܗ *O֫9#vٱ/݊Cg20wYz ty __@@j>D}ZsW r}ny蕅g)=yEEKc#`#%`u f}1)pm6SĉnB0K}L%hCPNa&|8"9!S&KB Lg=u@9h;O?d.Ա%cck/ 1A%LEvwm ƗӏJl|I](;/R &Uxy ׯ´1o?QK0f{غu+nY-Tq~,޺81aҨ\}m#Q;Ċ둝/_@2ioo[fFb\( KF~Eg_H0%*9IUqǔuyr '^ڈoK^udO@a42DE>j^HEF322@2{%ZeŨPwzcdUmC" d Bc2ޘhdT9F> &uytۀy;ѯz=/Gk 6"3ef>9S4y߱Ɂ5Pk'aV{m/2#Z+$0"+S2@# Pǃ4y_ifEdG["Niğ5GQ+C&)U%tхt/=C-2c !hذaA3ܟ\k諍Z0gg<,݅bm \OFdmj2fQٰ$?["qLvtj} VYw;wGfٰ7VA-ӊ]A*P\Hcuz7t-G8ێA87?C])YX2cF+ˊlYK!G^g%d.e %֥a]qwIk"%F<7s} ]^LW(Cع[>^+ j(40S'vEyu-}r!pLĒέyțk̙n⾡qg~xq/K 2atJR/CB!%Akq.l.=<6¸!Jbag RB!bQ)CuE-X0lW;t]o(B!0H]8oC{4 e6iI~`"M!BXמ×[ºw7!k0o#%}oa8r֒B!Xki&"]G׷n O/Ǒ;qǞB!bzpT9Ǟ5ՄB!unL< ^Ut "!B~b3 UKGD~r2"P&҄B!F-PXB!'aiB!ĺ E,UPF?i#ͻvUG|X[q3bB;A8cM!BlXۆ9+bkk{f}z7/Im1!;][ՙHB!'u8h9wEAA oo%@"X\.(BABa?E|*Th  ^&W| ŵ>.:5OàAKm˱z\nuefAK=n7Nzv_()L3LyFUm30xlۈޙ~1}g[gee\?oذȀaL6l0TzQn+:t\ez5ZooXe؛ю_?: O"{mʰje،v+ɞyXdL+$r0Jkpt$<0t(}sFrF"ʎCEqT$#{l5IA(נΈ.Y=>SKj?w;P=l}]Cqb_ $#׋';;[X#Zﲼm_j﯍u3#.5݉(#mrEK#-q謩/!z爕Z^k߶;vo8EU5;Eܣ*CL}cxev_E`Z B+pE27Cfl_l͈'/tpAYq^.rCeFlՈ_Prrr\Lkpts_Fn:tT~\Z묬g }.M}}רMXI~CC֡;}8СE { WD {eeY=멥M:G{[uw"@6UWZЈӗxO<<;{Yej-Ft(+jէ_xO3eG"v+(C-2և WG~ǻ\7dy۾dՑ)-An򖍚,|+ty3LdC>3LԻ")q0kfD~,7(Ay50~6ſ*vƝQA[s޽t2ǬS Gىul_ u9~2abFvDou+Na-nPXGMΌD6<Ҝ`\sIwge됉xg !BkB!BXB!ĚB!ExB١xyN.1GS N?l2yiT*qHe4 m _WFxj_*}*/;Ofl -`/Q 6:9<13Oc竳8Y+W"0m|rLhx IDATw̌iGT?!f2_ ƼDqJZ5>ahz;TR2P|+܄ Py1>=6(ർضy5є~S,^XP,ڂ)[=`k@c{ͱm,,\uo/ϠhTۅU)PѲwo|S6N Au[s\(MqsTޝO?ЈQԝ_w -F"N( P\*FLT$DR6Txnw#mU\s_qGZ =\ X/BIVH$ $U©rJʾÖ-qtkS6Vt)^dzc\%X;CoLQVWo1m"mLBGO?y"Q,Q9-h  bz^1Ic!:_=7Oӷ'oVoVNR}}'Uk 侰_Q4vN̟l¶-[j.~kI" &^D4COGQ|j8&W1r@>,rq ^bcjq}8*o JaɸD\u{S(QH kpq-cc*v`BW>fwBZѧ8/R9-CrF"ʎCő6*GER: ΰ0euz7tك-G ?@q۞hG8˼Ix+j4^~r DKuflڴ @VxW1Z~֚Ow߿:ZH56No>uH9O9Z jp(o\0"H; NUUpm_&`\cMLKxn{ս839c6:(]1굛e3D me0X0{,_+z2([=&<'ݏwj$ހSFhSJјe5t@!,?cʨBz~DjqxƆgWff&eO!BNB!0&B!5!B!L !BabM!BabM!BT5B B!o߾8p{9cM!BȥVfnn.y}}_dMfE%.{ٞex?6sdO^_lLJ1.'ٌxkT"Kkg<Џ:ne3άES=u/k$ֵYX~,m7L:#xEPR}RLhgZ: {B'}% hs f-Ɗ߭ |ml+Yl'B!k\F\(Pnn.ġP;!NKPukA㉗6dee ]D΅킢(T,!$::mƼxW|_uVVVj,g+Y)2* eɁß}Č-`X'؉>999Lk$e-/E/*G%z-b۷>ļ6sWEQp޽+HwQ.R; "K/v#O "_ kƮ z*{o~GS(*MAƱ@)/@e84KD΅ vN^Hb]=/=(+#buav)B. ; ԉ],|=;m4&"'o[ωsHbNctS>xau]%ME_ H>UL!$KAw?G5jb'ݛ`wP{~$yy7PF :Rҏ9 afb~B >^i|I={Ŷu·n߾pxvv=,mr[Öh3e`fy@ _$vx- ON VQS}nzt]*s[=Ȩ?}p)(\V^ Ww~ߊ6E o7ʀP") RB;WB{cx&ֲ4V=NSQx:ޣ8=?Z;:ԊG۪]"2us,u(>xi5ј^fQF>;}8СĽlBD^Fv_Hs:;ZֻvfkՎu߯(@u.s;_:g=ʟ|E#Ri.q3Әzw"f%ᴞkO !zTZ#͉VTSp@Eqbs5=f /]YyBc?! wq<DK3ęCiW;/{5wVpl/BZ`F lvVa TG5l9iRc8 1cVEf](;=,#& {8'WB%Ȝֺsܯv X0N璙0iGZf9|]g8wpf hKfd k 4_XX#*CeX~ݍy_V=lݺ[Ƅvq!UbXa{vЛS ձ^t`$V 02uk-]'gjձvm(:`֛$bJ=r0 çp3# f`$eb]}lvW݂7 BG޽G8R. bldv|A쑪/ܛǛ|ىZGNf##ony{)ߊNs9 To̎iRe f&WjE$h}{WjofHQ]s1fLꙭ{k$PR?ɗV|G%g 4N:5OàAKmؚ&3(;UӑMltf@*s&Ho"g$;Ȓv2g}&FC/k,_-FI$h`T$SkrbրZ,IAJFeky=dZ 6wf_zL_%#4e)##>=Q d Gvrϔ̞ h__9JJD[fe$Qԋ@~'΢25_me?2e%rdV?}䵙mT.sGIDb ?Eqk8&Wy CFMRJ5>3 _>{Zds7'o%^>xSG {^<3Tʕ/c(z!7w-beKHPvgl G#9!Xb ]baѦk,c"@xbhu E)h8>=eOmP9܈1X~wMNp}C^'̝3-t!{1qZg\aoP{ 'и@L# !dX7NK>x;!o1gr ǝ ZT:/|Df>NI }tU4!Bnb a9F¶bl?!{R y*v6PXB!K'֞Jz+dm/`üj1J }CHB!0H]8oC{4 e6iI e69_ Ĥ߃N)bpƚB!X7=/unBW5` }a3z5GJ,cqhxL_|#VLD~B!ub]q[鐹xNho޶c'ޜ=G8R 5!Bnb3wJE/{]=WRB!bMxxy} 3V@7LO✛;?B!!;XkGg.^S֟ryyFTc/@ӎ4'Bq2:MD2o^#wV 3=K?!BD cr\%=k !Bݘ-e 2a:H[_sHB!'7c;P{D4''#.: 5!B~b QcK74!B~bm*}VB]$4Ucnqp%g.ńv`5!B}b]sl8Efx q[81,_(jB!0u8h9wEAA oo%@"PB傢(!06B5 >b 5nzP\ $XaXWm"4'K}(&$Ⱥ)}Mm(qJ>B<]FiKFg` ؘ3cPYYY"77ϞlذL ,:xm}nu]*+|ZeكsdB2Rk,|g@ѪUmT{͈{mW0cCU2=HOܶK!Y<эС:a(;vUŁ3Qdzh$ _;#e)۷oTPvv#<0`_Ck}Ik)Nly #RلרMlF.%q"v+ #EFd(I6Cz|hB(#>W&z@?Nd JDm;ڇIQ[4*G BƐyX՘ұ>i7a׉cs+$*@ͱb?ppg*>=hꥊX2@I(jTz:uHʍtfxLȨg\E6z;wer4Y/MLiIMNhMe2Vv2N̘dӓ"kvzL=0 ;I\|Xpuj/|܏~XYS'vEk[kRKd^#l1x)#igZGFm6+y!^hA@tfKأַ{Fb7C2q"{BF6+3fm1ieRRo*d ^洰,y% ~B{ 5P *!_VZ22|G.~1 ޲#;TPsb=̤Ȝ鳒LP|(Xm>Uì/ZݡB%DЭ7 O, d Ɂ F[jF$';et8wԌU$˘ bzO[ 8o!F+Zw&XIHkc Gq!u89 ͣ>ք0:Q>A"qq%x>ֱT2/Ң~?OjpG${a-?ϸnT0w<+U@u_ {2ҿ;5;P)zlt嚟o >"o !*1Ox-܏ţa6dZ<cDz4p=eRδ1zZp}|ٮ빙Pw; 'Y;Ax}y1 Ư܄:n;cH<_}--)=WZt?ͼ_ce='1K!I>!184TdW_Xa9Lh{ľr B]oҽ%f*:#xEPR}RLhZktu+Q.r[Y50kKA.Uާz;.Q,_(Z;sBhaB X rV߼d9S \#ʏa8O{ߴoyrŢChE23{b%sgB'btۀy;ѯz=/ %>V9Ҝ?[G"uUOQ3#o fUz*f>?Q_@G8(G~ϒYw>wtZ#eO^_lLJ1.'L#X֑ϛq.H@_e3άES=u0:\Q5>hTyE)h8>=Ֆ uOBזZX@/bʟPpB1ubW$+4B!Z,ݞ+}OpFJ>xauVM0=N^ !Xcm= }&B!CL !Bax|Q !B!pƚB! p:Z>'B!L ,S%UNn;e϶=ζ|{ B!&քB!0&B!5 \Ķl;l;ζ3&B!5hPz';XB!@Wff&%A!BN9cM!B XB!ĚB!&քB!؆(msrIO=?R8qgeORC-̤ahP8w27SkRWyl]4oz 5gcVhtnys~bqסIie~;_Җ^ E,UPF?i#`fL;o11aUv7-'J\#Pz ߴC7`֛ߡ˕Xy&H`XtʋiAymŶͫ1&dbpu҇"ePMؒ493_$]kmÜg1`Z{e~ }|uF/.쮺oNA{#pq :ۚ(@m;P4lڇoEF" i1 Dv6Gm:EUy5b"!puqm8\:bN;ҢdGHR|JB%D%g jNx.pTUl9[#ⰱK85R(gtL}zc268€!mHoc:bxȫ.xfqO4iGK13pkH2ӔEQ,y2=}j}7 rPu[8Zl 3ugwb`mيTcw_KI0"::KNVS|12s'g掻IS3_ñVa}0h N IDAT|VrK=Ev&2*ĵDۛ"FBj^h]sk)SYPX4Ҋ>A~i3Qv.Qq8*ґlp)ӻKl<\e8D8Ԝ_M[QАS6' W_6cӦMشv:'kl|ꭸ+9ԢG"Itz[CbJP3EQ|s4RO7@Mu"\k{ F7kobZbsC0wܷЅثiq}DqBPD,y'Rh(n! ǂcJ5\ףߔ)@Z0y<~S  2 7pER=/=(+#buahSFbScem$R46<233){B!Btϥ B!Ȁ5!B!L !BabM!BkB!BkB!B}RB!}ŁtkB!B.%F0sssۇۇ n2(,upٳ,û<׵#}jϼef@(KMV$tT8f|[]Z8]~Qp%.ٟȈwf(}Z{pZ3N{BB~gbM!Bkͥ{BBgb포,ȹP;!N{-8ˋ4!(EAS11Xo߾5s{EQ(P݌ !Nw猵 |M\{љ';gi0?𚱮=<zgϱv2FD|QNԭ~Pvp?)a[G=493_$izXACʗ}cu0c "vxdX^KA\FsDjURaÆʯTPPՁ@F{$2) ;bV~5޳%"G(`WYL y(a^X-$ vW|Z@"WFQ˒3^w٢f"r׈N"P|_Qͯح ;7ڰY֋:h)@vh j =m ;fPrF$։3,MWfffxee-/FT}cg }ٷ۷WDk<s6@>ZD^ڤu ϑ,[W>Wg=㠨/Y/dG⭈/_3dEi>W|wuHV& BħD8.jW+R:8p@\ݞ*WBd$BfKAJ4!,9CCQzwa\xp3;c!dkV5fizlqݿș(uhy,yj}ET}=C і,{.Ŵ>I](;/R &Z1#z0HDpqD@m NrC1d̜7SFzg{zu`41 tZRe#*w8 2x9zNTE:Tð ͘9#Fz"Ѐ̻_07TQlRde!5=ErAh-Ț\ӓ̯dclu]ˣi,_NiğȈj8&Wy CFM]9t}]sjҾkW" eug"CVxm:ڢg w%!6ug5{0׿͌722J8ـ%,]h{}~Ya+Xj-~0PEs*i،ugv|6y5 ]b`Q*Pܶ'Z-]}?*;TU} { P &Ꮭ ՚TPׄ>X'މ%[7ט39  }C -̝3-t!{1qZg\aoP{ 'и@L!B!k ۠58pm|gގa܃ݐRDABlSG 0:%)JVUO&҄B!҉R>Y[` 0w 醻?~X>D&B!aXPqr/>ޜGAi =h/mj0&B!Mk˭oaݻU n5C_،ŷ^r+>Df)Pp}#]1$B!XWƖh:d.ś-CdZy"!2S\ B!BX'tƌ-0=snn{zL!BO= )<fNj"s 3!B!Av''~]-?-b4!2Ӱh5!2c."#~B!3XG$ /Mq8Ry'nȈ5քB!j/|q'?DF&҄B!unL< ^Ut fPZU uhtc6;ҼkWuGŻ̥N}=g !B1Mr]="/&ۆ9+bkk{f}z7/6m1!;][ՙHB!'J 5ys}'ZiQP6ۿAɽ=i(g !BkCTh  bz^1I>yrŢCh+p9iB!b&aU 6wf_zL_J\i"9lذ!].g .KMVjW0LYPA-V:XID7l hָEuy9X}Yv(; Ab]'ၡC1tØe53Qv.g")kzh$ _;#e)۷o"\.S);;2D:^kD;k2BX)+%zy k$?{>Gʑ3WliGS d Be&`%ZK.гDB6E}Ilh97◲t,[EFou<|N > &u2ú<ƔH; NUUpm_!Qj}k3$~8S W)/@eF+4V/U ^h8H *Go'/X EKwa-k7cZjzҬD_Ox%N̊fݫf2Yr23%z[X%Pkk',$*?jOݏ׌ä{.RQ >,8:gG~oX,"Y "32FV ޣ5i8̤[oI ށf\͚f 2#~%1|&zgkdv~IOv5LzZo,{fKO'FP ZsvglNJNk >S(* ]H@i/3fwA9ٷ/IٞrwY w,Z_ g#Wf̒Wvv6 2 "Q}e` _=}ȴ@95ʜfna>Ȯ4Ջ O, n9}RcVK8uFf3)Ѓ9-`.M[w2-\e򒉵<;>1u[CݞO ڗ2$NLw#hQԋIZmi6aJ$W=zbgL?$'\SDB2CYC]&#?<_5V7Tuw9k#u0["mFm8Tv!sNTo\.)* esg#3fF`Z1+~233-0iݿP2uLn1FύNjſ+v/),,W6a["V7=sootyT](a^uU&cT<]2诞mrZzFKd~g]}=} yہgy@rr/g޺]Am6]'d_mg|qFW\8С1ҟh-C=ԗxCԧ=_|,_<,_ey>'Bo_Db~;@V5_hk~~\:HfP, iVn0 ΰob?}^(#3 x f,ZaU12^ 0'"<%@K]Ե3|%lg;qdVYއ"̶k+7a[Xcvx)KCV _rʻ9`Ialv˱֍?m&o$o+`S0 14T.!R:q`y|uI@(] Vu_]=Y3*XpI{V80uN?@w]p)AgZ2BcÿV_[ ޻ bv{BgM3_Qzw8O{&քB!06 N꞉';!N{Wff[XX}!B8|X`RM!B EB! g !Bg !BaL% <++kICUNٳl;e϶,m7.!B!5!B!L !BabMC(l;ζl;ζL !BabM8$= N{&քB!8%''ǕIIB!|XB!"&քB!0&B!5!B!!"ƅC\/bSϏFA@`eӨTv0<3i&*]Ƴ FTU^v[۟Cbq٘?b);+[^ܵ_@musxcgm٩Wga'qWlB 0kU.EaH3ӎ*CeX~ݍyE:k}7- wdl?.r%V -b|{lPky[mjL ){X=orsslY?TS6$"{.Ic0gY X~=_yA")˩> nASģeH#)l@d" PbΩ(?~5;۟ag#;-HÃ@ZD$ -=Qb -5pjQU^FHl\F\(؁-⎴((1{`3|7'R;>_|I2QHڅS. l}-[Σֈ8lR|z9gMǸ7Jw8Sޘ0bHD0ۘ;=~?E" Y9rZ@ħ7@ 7Ac( d4%B|Qu4 {noOfGk߬e߮'M':Tg;Nk '}a)*hLٝ??/؄m[b#X5]גDLNiğEqL*cj圁 |8Y&=nAWpU >’qOqѥL )q-QکZZ Tq}"%腴Oq_rZD;#mT9t$aa-n[>?:W="q5yVht74͉×bشi6Ήbĵ5z+J?uÅǑkpm}|rs.L_QMaDM=vxv;6ڞMڛ̝3-t!{1qZg\Aslux/'Qc/k7fމ!6ʬasñ`XdR W7e :&P4V{z?Ly/OH cѦbϋ1sO1kȁXuCX8(ǔQXYԮ= 8+999LʞB!s)!B!2`bM!BkB!BXB!ĚBHqЩS'Ҿ ڴi:uBNa}jA? z{ wM;x(wE-N=K"8 nG!a|7u쳉R7WG 'cԜ 6 oEIzkpoB#!؆*ߺg{Q |;u :w9Ͷ;sèpW;cXO>URwv_ @ݏ_`ɘqW/;.ԿƺO΀BXBE]m"]!4}Wg'v._[*ڣyj)YgeNl} r&{-p0\WiI%cŦƟ?L숋CpaS  b[bмO 5:5KE[0RwBFZqWcsxG\娮ms<|T?Ʒ!m<<~NuG["PQ#(gS !L !7 bpq~9QT.) . mbhZDLj*P}iHLGwNe2pT6:|/Jq yvh~ e G|QZwиNV kйσ0u4+@Q%Ӹ5W(aBs5!8#hsNaڴ~:hzjky] x5:4Dف.unK\@mZKqJNN+33 B-Nn'Lo\7(\MBA~~>B%i}Ѧ''1q jBRB!^WLwA!āpƚB!&քB!0&B!5!B!5!B!L !BabM!BkB!B/x@ !@]];"Cll,"##)DB!U={SB7I:uXj QYY |{8_q\螔zM񫆽"!}*!N (DDD%uuuػw/ًNv d|xIDATc(ץ] up\?I5!A}*!ዔ5~v6lԀ6ln ~`u8:e7JPs=Y_5/|ZjOaۄQXz6L_5]J?kN-^{,mW/቗N׋_ĽJƿV?UyGfbFtt4bccUp4\U)C}ce6H9 |ϯ\U$D^V ĉ:D*8}~< "uQuADnCBNNě1zx%[Ѡ_XϚ;}Xso$콀[Ho>j~y%~rۅXu<>( _GSÝHcA<Һ="#1QQQR]W=4ǣ}#5u]2qݥ7>^$~\€=G;7@\(x},HliߏEaoc?fM&bHT,D*nݛVZEkiZzjUӢTR.ĒechȂV{}GsL;ޡۄt>յdm«:0g| |pWH&1i{+y(粆xwf}Gg"X9Hzv1lpBUXe v_j@uQecOq[T>]o`lHa޸?550Yπ|35y},m..a,L2cgD^D{ؚn,c_$].*O/cf|ʚivac({~VgoșL,{O=sÜ~Uc%IÙs>o]uw]t^֜YIY9R3w];3Yq"/Tg$b|}mo ӒYjV%3ź >̽\T[W{tIo `=͖%ysN'5UH H4Xy''ٙ М9amD::V2棃)vmHb9f|k?am"0q܏9Urzj7ۙ0v2B8*ݮVHې^ Z?\C)-*')y/L]QMY0vtw|uӕ0+S+O38-|"!结a)r,۹sgwM1uTlЕnv WyTkBB]w%X<ЛKib#dE׋ 4~U*Vڎ(l6,ذl(ڶ +%-jk~O)/>¡V#jzBw w嘼MJWkDPţMHG z U,\(aDɕdUlyљIQ;9o+M/db3]}V' 5`U(NMN\_܂t)/xú2oj>>}hB@~kvi"|h(dT5?ץm{VVĚSu.+u3_] &$لYGX3}&ƔфT7j#71*^ŁZ.J\(Ƒ5{9y2Ga֗hPp4~)2[n&fzx @«u T0DS8Ppg ~j8UHtf`1ֻ*A^o)C5^T|*Yp\/8i).QQr)~kMSഃCktç4|Mی{c.Zk2֖Q|q<z%^ Qݽ)jZ+΂L>4xAptklvLX&&9!>m6UPp\;Khǭ823e6.n\| ||9#,X7]%7"/e@D|vwFUHyvT[ɘ qǜ^5B~(}RmwhFediZrz9T1qU,pR Q a-1l=3Z.@sیbU g窢V9x¬>HVʳ`B_.:acd:TI{yڠƕsqwXX:/5VJ)G<>c3dwG+ctOF6iR41w;TKy8CJђ\#\o kф9\G(q=;g2q Wl+30s=}*)CN4z-zmda)85 L`]V3v֦_ylՓ x4G] (~ͶA L~xs8w,(0c,)L1$ylηf¢\r[xʷ7Egү(X'e:pfEQlWz?G&;ՌGp}<]L3VVOUZ>-_rwkf ƌD񏞔ۜ*9L;5OatN]bo.ѦsdT"g䶦[ѿ{N]_Ƣ%9x6<X(ΐH*]h͢i\w8jjh&UChO(֟Zl^u T(a:mVlV+]CTI?zsR9i-%b'W\=96%[J/Դ)GWr8ڳo?B[{׭1|~6Ygդ]w=ڏch;1WXβu1̒?C+v*5*μ<35CR*5*bwXp`2< RD|'1s,lMllC6G9؍i[//?'|8͛\t^eoV{^_G9"%i.?ż΋ _@v&.!S1 ~N埡|Osi>ل bx`vNǁ xFQ3pEqPy!_syN]KZͩӻZ Nxz|et>/vn/!j?b;ҏ5슂=iq&}Vת[2$r1OutdX85QΆHz̩ynuPs}"=\Ww*O=,4 B7# cؔtr7FcX^a%飷Hp ΋ZxTQ,&6֏- ڠ[} 1CǛ %6PB)EIa/ͺ@5΢מ%-KE==wJ{,kdJbz ]x!r@mϿ W- _a|q|iݵ+6ri>w#ަՓy%5ǐA^^8}qɬP1R'V)S8cC7QM?T4i9sZ_sS iP _Ow"ljD k>11ktwffzӑ 룖e&O *q! V6mZOII஧n(2&}X(i+!O?Bvv6LD;2W8MtjYi ??lAff~y4P?*9NmڴެXתUcW_}U+UvOVpjB־,9fWLRT?;5 dddRzCRPo шv7/~\M&k!sTsׅufV6~4-~l6 ju|8j w B+$hI8k햪ZͽLv{V}AsH˸7JNQ!B!t6m7 "B!ăN k!B!B!B k!B!B!BHa-B!B!BHa-B!B!B)B!Z!B)B!Z!B!B!RX !BWشiB!wá^IENDB`spykeutils-0.4.3/doc/source/img/isi.png0000644000175000017500000005754012664623646016225 0ustar robrobPNG  IHDR7bKGD pHYs  tIME -/tEXtCommentCreated with GIMPW IDATxyxT9go@;"pZ^mmmvoڪVom.5Ekpe%//!1$0 !|^ϓ2|;||sbqd۶}u0 @DDDDD8NC;w| ۶a݀8/jjjixr"""""b9p{{᭷zXN$fꪫ|b1DDDDDp饗裏~0LZ]/^?'J8hT[JDDDD.ɓ'8z9.xDDDDD4M&Mt\~b1і9B@/x<~ٹ:hˈ mTϬV6?2{}955yt+E.-89)>`-`O L%%ʹ{Ko6c0kxr\=;m~󶺽%M6m&Kawl 2yIuw}'E[[lٹָ,OeCYm>ˉ@n@{ETZ', MToXæFߥIF[i3bZe#XFs駰oìw_ȻJ ]ء 9+`L8o] x[6sgaZQ$=l\ix4ݯVzpQl备vz]NDdk9J6jl9N!^"ZǞ9~xPܶ[Ϯyg,"Tw5fn_138eFGtjw2Ŕ"JUM.?c&rx; hdǛ'FҐs,Z| NCYt7;#'.ܓ4nymۉ&; Esd8b IN9*(eSśw7OԹS(dʋfH5᱿ May$ZAۼ2&ʢt\N n?Y*oAnskkHr]Af}SXQWmegM,ʦ12]}swS81{orrg=)H qdǎru .[<kϳ]$oaU Wj8̶ȈpdR?Ə?#i@q mM4%8({I{7α,˿ٝ[BQ0Nþ߼(q B4Xdf OnU1Y4<o4vCegancmKEݯeƍCq0LS;+{qPbWS O4k|Ξ]"d[Cճ5NGHl܇M};ǺǯS1Om~ţoD]ٔCfξcoܜRM=DQ0^m'o6crH7ӴM$={^14tc1оvt2F{$,%SꃆH)EM4hٷ`$dGow9Hf[c}>D|dbYwfض͞={0MZ&N8b-"GSIAwGo{'R&Mebo8k~0 ȚŹ[D I$c ړ\FY\t~9fjZKylR^Ƿ W3~2s[xdx߬lb;gw(3"Na%29 4^斍'h,ni}dҭ.3/$gj3勋<[kSB$2w-gљrBOS_xz'ľx3ÄjfeK͙sST?8dFAdloH9z%,,6I4Aciۿ˙H Ov%i|x Qb-4t[| E 5ǣz hO 1vl.lpeY"2"^̙3ٻw/x{8~Oyy9XlZ{EZ/"+ku%XO|1{k}>sub[X- 9E8%Rĺ#HP%^BOjGc2D6-@ہ=݅LM6|s7;w L=@},moo4;'ä}Ym)14x$&4o !z1ImgAHXVH܂^6k&N$i) J8<}=lVvϙd;7[hSK֖I7b~fpƌ|ܐ9dFjdb 6>B+y<=.ca!p8\ :"m۶~?餓}ׯǵ=֯zϟ?L0Zba(++#X X;[pg3y$,cFv5K'bXߗ6䪋f4R~VH]_Ƽ%Akl%L[}0ޥ3z{h \nDhH;tǞt6H E3O~R5fG.L޹Nυ p)~C4S r8>~=oi߼_i&MfMq/ۼ.XKqpYm٩fF 38oOt&̚JnkyN&n!|DO|Rz0(,/hhhsR $$͗:nxwp ƅ7íE>]2˲2rݖe{܎m۴a?$dƎ5KSZ"3w>\rB8Z ca9 3s7xRkh2ͨ0: p!0pe㧍8쨎 ҡ:`L>W~ZΚ(ADS-9ݵx! wlx,Mi]U\Тgu JQWN=F"!h>#se-zzƀ,[6c;o©X;lhۀLK:j6e #0Y'elѡpL!hm=P@}}^ykO'̩S!ٲ?er0 |-)ܩ2O.|ز{ύÏfBiUkX`A777>ӯ!w4mYMMMx<pD"ACC]E~PZ{E]8NHc- u8~&Nc }]{d@"tPKrxRxǺܓYP2&b{E &Hzw`>IX{X?)NWqF9}859ìiZcIWƄcw*z& :ɤnCp a\Բ!Ύ'0W:N$%i114H3-qH% 5r2'Fl nR,ÔlHF 2%D+7 @6B0P oaAOq,b)[77Ny14o%*.|>oy([HE:Qb&[EKgHMdOϢS) )OcgEғN\[nz\9m-"#Zg &L ''b"H")//Vr***M$ٻ/e\fR +'B[g6<+y^Gg_g+ؠ vx),G R-kO;<>n2٩fZ"\߁)/}5-$ <&X$46X'XHiI9~ds<$0k9Ͼޗ[Dq~n!ўS'Fzb(?l0~\..R-ނuA}${G~;/$i=JiA|.4H-a:IҀFGzcmFe~eVXXH*k\n\.. ݣ,I>yz^˅m*""""*Oym: 8ضo.½W:%3ي&"*֒aT 닸QMŸ{!?o># ]m)zdeYnR!y|wݖV-E5Qθr eyq/f+ uͽ}xWo ImvΔcr9猈m?{l6o<"~g;Rޗqƍx2;%ݖ.[oxd2y^+naXuH8x) .awxel>b8eP4a{|߿;uvusd6t: q+"rL7l5Yh>cXmf}+ >˓@{{;hۆ:n:#~޽MA20a„Cw΄{~{7.,Uضͧ>q pw,ׇxds׹#cڵau^kk+W^yeR4A:&Ng1;I*vz${-ƴӘV5|A2K_:^ۍ'++ ˲xyXd |:Xr%++Y,:c.5H&S쭭J%/,cv Nv.YfΝtS^63d;˵8R)LڣY)S̛7GCC6m~k8NGS'?| &/~_7O*>lذp8uo2^&n T=5~?>Ef]yX;@"eˆBvBmqxrx+N%m:kHS[[3{MsuuupB:xq~K/"bY .ϫm5ז=FęXMMfΚU`4۷ogSg+Ƥϐm ƶm\.WX墾ŋ&Q[SC*νwY rˏ~ȬY'՛Qj4wSXCb 7|{-|cC@z ՚\3=RX~|'fpaTt!;;zhȷS*"{Fβl>873!5kF~Bst jQG_;^{}/8eX\X_OJ4M8~4I$v4 |^oׇ.;u/9Ki|8쳸3݄ 㩼 zgˏo}_/zJPEmR\!?>X{#:O:cH8L#4qq{\vİr):e ) LL9Cr$[nٳ:|f:8azٴc|8i^Ofw0\.vMnnnL;n@lc\wu~@UD+֝K~J֬=׎ k(..!'XxdE 1 hl9EC/9Dc1>?`K" L:V1_̛w*篼D2 Im]-={plcz_|[~t3_CV*^1hmOX;8,y3g;R4D "i'nMa$-)!;'P-~kV\x!K1MDҏo$|G zvzțݻwя~O>G}d2oJJJ4HEDź{,t:/Gk^cCv0hm [tc].d"I]} ulFiB!|^dQ-v<x|kECdK'+. HEH;>`0ص\aҤ,]7hdĭ6 K|_źsGuuu#TWWX r}u H)E8Fkk+ɜv&C[xfd ZlۦT*I,tQ IDAT)T˅eY4SƦ&ٻ7ڲ,N?t^yCΛ;w.[l!ŻX^Ku=Ko]gpknd"E0{ UU;C$2ill̘1chKAGEEE!6n|۷Ck[+e9sشi'NË'/#<ƣ=NmM5|5'Nq~mg9gO5_]Z@8m޼ϣ H[cm쯷 g kL^^XnU{<f̘A q_`c !JM rȵxPEaZr-i*Cv Nf6x,F< ljŢqc-lۡ9'Ns9XzQs=&_#"2uC0mmfsb= wއH$M;N-̘147'(*,5DQu\n6eee!O^b444K<%H xW0M~XCzF9*墸x,E6KybL>x*E:rr/r 3},<04M{ePmIAr) _G]|8xQqFN\uU<ý~cK.!Nsi8t2I<4df瓝SK5Ur8f&m<:u#_ǎ;6`;E:J'ط?gH8̚5˗8ӟ4H{キ:,ϟO4@X[E[[;Aֽ|^,>e9sxMcS+dP$䉥]HKsy{O5{ mav>Gyy9Dt:M0d2?q?_|D=b 'cp+m hkZ)(ximmCRU6y\ضM"HL$H={=q,_c̙… {}N_mYVc<g|\R)a\w?"wZp:W}fylfYm[v@XC:NJEc1eTL 0:6'8X/rz衮׿r|,_3A}] (uM7ݡ !F7u.imnyN,XuW\_ϙgf mql˶q{X0Oo2lҖqx'4k>3cƌcG&'ggaڴiCS! qgi&vڅ駟&++s2yd-ZUĵZDNb=qcg:}n ͜%~ir_L$l] gKo8psXn C<u}P2u7oQZZ5J:nXwp{包zݔq姷5,źcw&NUNKK b-8Cuu5cK4i u}h|_)1GZz; tRjjj馛'>A"z9br2r}Fv;,٧O{=>G)LpIV?nΟAQ!Ϻ5CQxU[&t_!{nN$$>R dt*eB!ʏrvg8e<뮻'x?:vd5x rPaReYC;{)..>1ˇy<n-[Ƃ ewH&VrVXqH"_`0ȅ+!M۶I&DQbd:ri&nGVVYYY:j6m!TVV2qD`1Twa%JH$HR]k;ϻ{gܹ}>t:ͺuYBvݗt.5q~I<=}e{vy7"^Xhfyv[hM0$ T>+}墩|#Cv:%fbܸqCZpiN#`[իWo^yxuziz}#O4M|>>e;OtM]o˜>}R=H=Q ,OXx(Zk84ي&"*֒a;}9ű(s3tي&"*֒AGrTcYdhmKsMDTeKmu6[DDDDTRyTc$HPTT7""""*/ŴiXncǎ,z9ܑ?:luuu,[8""""b}뜜֮]Ksss˙y>v24Yl&M" )aXEyy9SN=N:lqH$*""""*֣C,#iCGLmkkkQQQ}NXWUU)qʕ+ٲeˠ.="""""Jpkuꪫer*Up1VZXxKA݈^Y zj-~m*U(WQ6ꗂTTTPYY h)`/VTT; гh#c,GNlR\EUHr*VZDDDDDZdd4r*UHhJr*VZDDDDDZdd4r*UHhJr*VZDDDDDZdd4r*UHhJr*VZDDDDDZdd4r*UpqTQQT(WQlURE[DDDDȨi**U([k 4r*UȠi**U([k 4r*UȠi**U([k 4r*U.V=Od9˗ۇM|w]W)%""""*ֽj?]vl9m>$i**U(] ʔj?rgٕGw˰4r*U.IT__$AHfx#kԅ1\_MuzOw[dtN<=XƪU+V}^ks.*5n䉯~[wxMp! q70?𛨨*V\y0 5~8 """'-[ 2WaKb_0gbO<㗟`>(""""#1Z aݿ?-^lƭ^7R*U(rl - -`EDDDD2@ZF5MS)WQ\E٪Xd^\Ereb-""""b-22hJr*VZ$4M\Ereb-""""b-22hJr*VZ$4M\Ere;\ܣUTT(Uvnueee9iJr*VZ$4M\Ereb-""""b-22hJr*VZ$4M\Ereb-""""b-22hJr*VZ$4M\Ereb-""""b-22hJr*VZ$4M\Ereb-""""b=***z(WQlj*gŊVTTPYY @UU+WV=Od9˗pq7\x V\|5_-%i**U(_ S,;n&qWW/w]O~ȭp (WQlG6L{*SzḀ{r!^8sMCS\ZDDDDT+j{,(Ԃp0MS)WQ\E٪XVsQob׿pO=)7OwN?0 |@QxZãUW]uBv***zȉMTUD4^Eh{@m9qiJƫ(ᢥ """""*"4rWQ*"i**"lUEDDDDTEFMS)Wxeb-R"*VZDDDDDZdd4rWQ*"i**"lUEDDDDTEFMS)Wxeb-R"*vGPC0 m>8 """XWVVp ~)xe;h)wB.*VZDDDDDZDdd'E4^E٪XdE4^E٪XX ~xeb-"~xeb-""""b-"22hQDUHhQDUȠGWQ#{= *2iQDU0Ta "rƫ([k ƫ([kkAӏ"lUED2@ӏ"lUEDDDDTEDFM?hUM?hUdoK|K$Տ?xC|ϰ?DDDDDź[ 2uϾ⼞w!]~xW>~ҝqGWQ*֝ ʔg4_9cr]:On$(,GWQ*ևuox;8q[khX~z}Br O?}=:-G:#t VZXؼF_wJM//]oo>Ze}b^67G{= Ν*>.*v$Ҏ`k9iQDUHhQDUȠGWQ*""GWQ*"""""*""#E4^E٪XdE4^EَD*** !c8UVVGӏ"lGox[xن&nB9On'Imc0M?h:^œe/cn٧{F#I,blIgP?S"Y~xe;eËW]ݤN 7Bt-;YFƫ([]ֻV̯E&V]6՜F/cG1S9,ǜ%&6 ƫ(ۑX|:Ln{W,цƫ(WsEDDD䄧4㎦E4^EَDJs'Qˏ<3눤_grͿ^L}QD2Hӏ"lG챶?\͛/"{B1/j{Y.mdQ>2T8o~7?v#?/$,"GWQ#Q9ym|㚥*"2d4(*vkPysj~xe;zu`+_OkH8ڨ""""b=87O.9=ϢlHYD2Kӏ"lG,9[_xi#HfiQDU-֘x|j""""rq.X)~87/%ڇyuEEE}4(*v$d|/LOu'cl=C.~xe;efub/%\8Y(5[EDDDDxKblg[uM?ܐ,""""Qrgf3s7;~6n~Gs6dE4^EَDGEӅ+g_J^3opiyhQDUh-XV:x&X@nyz~{>0ES[w~&""""X7rc x;w-^ 26hqHRxK tDDU(-esOhkO_Ǎ7\̟.ϗȨrWQ#Qfj."D(nz͜#돧+wX2 C?Hz}iiӃoժUΊ+ZB|g#>;xwΑ_UlMy5=×y{ REETUUr^ [N/DQfI{6f˖-ի3$n}uC0vU4W2aW:k*eY*"""je{"QAL<>_fڵ;~Y@\OpOah-֡{|-&.IDATݟ /%:{&=+`)Fi|_^pDDDwZ lUTR~ni+5EDD/e{"ADDDDDTEDDd$amHhU>Rgͮ ?/bk .naݚM[MuuF*"""',-P';*H|>q})_{ݿ%CEDDNDZ.lUŜkoOGu)Z-""""'|^peTN]p.}^lw|SII -R̫TP( (T.'?8pȐe@EP/-(7A([UVZ(i )$xI9\V/Ƹzל<7AONi[h W^ >좊v;& [uaeк6] %zivt]T1=!AuQ[œ;)/9OgyXddM\S{ɞ)Z˛6PV~ TߟE[1.@BQ.Eq!%+TU9#ŕwXM:>ƔjdKpȖb ̣ڸv屠 [uaeqa [uaStZ)eQ@Fi;ӕWWu^4yNRYddM\s{4qv]x+Ȗb]hѠ@."SwOڜZڇO7QOn&n&..Xl"Lcb}Hs}jcsH5kfs5('p8(z|x1vPò]zh+pK;YdP 7\5ڟ{ Rw;YGkq&E= S'+ó,{jx+Ȗb]hV=0Bmou,^/>RoQ))I =)ߥ\[0.@ܣioVqeٸE귦kO& b\l)օLQbн,+$ɪѺ7(Q}X5'Ů<\I:E~XddMuzsZz 7ಿ>^h,4ވqXT=~{>PȇX Xzs??]Îĸzu5XX@O>n i?5T"^qXWk{W\@"[oS7УGiX_5+ni;Uݺ/M.?96DlkFAzjsNi7c\l)օvź(ҷhXaڒީasݝ~P&+S~a=yxA򯫧FG/͂8SyZ|j ):fb}_Ф%*– y!+<ŋl)օe)*w_N%3SZB;EQ!"*ܛ8a9q4/'q}zgw=(-ٝ*dK.߶Zxl?3u:5M=W1]kGnW|S{zɸ^mS]v0bbb̨݋*neBB`-]9O,Qg71j,)}1S &OSJC.!I:t$):: al}p&D! ?٦޽{ }x6|U|.^][$[]jU68\잍:he a2z}x1uk[jij\GTUƏ꭮ ERdK.br.- pT3W! cmQphY-[VeCu~z^MSwv" [oRXYJ6VߛUwWV5g z -ź~Ҍ޽4eM}Js:'IMAQPm$;Av3 nM/Gϳx+֛mu}mq&X7ƸR\qX([I{BvTpqqq$[r8mŋl 3Ak`\l)(=0.@k`\l)(=0.@k`\l)(=0.@k`\l<N `4MX_ c&z FA@x-Ak`\l)(=0.@k`\l)(=0.@k`\l)(=0.@VҞn'UdK` QPl [5 0.@kk [5 0.@kk [;0S-ڴH ĸRo|VIwV)vb}mi}ӫX -43S'ϫM` -Ƶji??FZ[|/w[O`+\uO'4bbb̨(7ߩ_fHfRgVVz)=v9IҡC$IW|(IL,^RZݻзZTt-~iVip8Q?%XޥK95O+pKP5 k kb (.(X=0 gv;3FAX;6;FAPhPP5 k kb (.(X=0 B 0 B<NI\\{Kjv8nRgkb `b PXXkFA(pFA(Q5\Q?'dIr8mb^?`FXk B 0 B(pPP)ֹ7ijCT;E=QOHSMpxHQJu:Wh҉) ReU*m Jj$\'I^X_3}>,YuH^nQ?>ЖF>XTn[d:B)ڨV!}(omxiui/5Qix[W!Wr\A׋ۍ'FAWJ(HqT8xUTؼ^n} ,,Pt6@W= J&A.ŲO~ kCuS?֘qړ&V~@ ;Cyqj8%PVIF>]5+km:gUjȋ)ƲD0Bۣiʂz8O$m>\S'ia]]~KDjo{7iƨZ'QYF"w^Zek"i%5C֐z5vwJ5٨Ԡ* 8` 2ى:AH5nر^xcN5%(X+(JUܓ֫uNXTB/f>*[R$}`ybv]ݪ3I}dEFQ k Ζm 4Ncc3"T?qʚ=4ܘ(U(ﯜM6`FmX٩qL-+yPe-vЪ >L)J9S)7\1Ub :5ϡ]JQH`? utBe*+ʲyzJr<%J873G+j({ ~OTϕw0Ռ-jlfO||7kuŚݳ P5@(u64_3Z9aoTIyZЭ4~֟52J]f-r>J+U’gcOJCtVdffn<9gyڐx h_վ*UbUvOՙ<P}q}].'R$YAO_i\B619=WfH=1jg赓wDKke~$eGkǽ1%TY _ JAn*?4zT?%{_zWC:P/`=p:tJDziAjfUnW0}5Sxg:;^ѹ7]QoLe\;Զ#|@x!f (}XArӾ~YFV b)Ŏo~9c{%cSf4*t=iԦcN6a5\sN֩'3s~J2jꋊ,ZQ%8!MY5kjЊ4K%)GIęhsj!X2%)]{|ѪZZf?_!}kگ6YeOO_>f 5M谲s[tVYEkitIVUޤߙƨsU,᧞3uI3㏋o+^=g_7 K;sg֭|WVקǜJf>___}#dHRj7ӡ/(J7{ZD=/YLm@GI/GXѴ>k$ɽYgr z濝 ԧz zJKvuv(vc:;I>ӯh̯t4&kvnUbN+U.{S2=IUrU!(G@ոU_3+KR vV]ShPm5Y.79J>rN7ݞ债Զ Kyտ=B}۩~E[tgy8T@faGZT{)>"e$PZ.kk4V~!a4 4W&ωVߝ7Sd߾l;Sϸ:a_-JV_jj"M=cձ3yDgWUג~iX )Sf!Ðj̻p/e7&gfݰj\U6_ۅ,RLzJ֍a@=2a͝jJ3S>n ^寬~; t(mOPsΟw^MU2]Sgj]SU2+ja24[PGUl|W)I9:NP~-_O馔sl8la-[)鹅\_E4N(2+V]MzL Ƿh$9SJ׫@o?7{oSjX(M_ѧ4dj+KE u#xskXKuT[$^~s qU*秀S{2XcvsLyA#'ܦ(o զ_ԃKQdESԬ(*f?F]Bu:q" ^Su4Q Ij,5{p111fTT+suf*UMZ;gS*Ed6SnZl9?j#>>=|JEzbSffx]u\O!jѷ}LQ@,W)'yhUTc Q||<^\b P5@@(XXkb P&IP}91IENDB`spykeutils-0.4.3/doc/source/img/sde.png0000644000175000017500000013270212664623646016206 0ustar robrobPNG  IHDR2gAMA asRGB cHRMz&u0`:pQ<bKGDIDATxy|;!@HK B=QVlUj聵j[+=TVވhR@hp@{ٰY6&;;d3y {>@U . UU/Qe((B4?pz~#8K%_\v 7-˫|zC_~=K%}}w|&#պ@qwGtܐ>5llllllllltl6kXH;XDj//`vHNN!%%t,._/GG//L=J]]x<Cx?o?NM?$ӫ@FFFU+u+7}ΏW7Y]et͏K"!hjWs~ئvEw :G+Q/ξeX<L&S<6{'Ljjjsm墾o}Hjjjs=޷LGBBB˳Z@ !/))yy-?p!wnQ2qe} v}Z,^Qo[_Nkw60> ɵm#%$-.Ek?r}8ݎ"#[+:BQ| vҋzrŜ)% sB<*X=q6ְk rMN`.d?~(c=Pn\i(V{vr,e(c{]j8Z#?(feŤ:LjrK|dEQ4hIII III 4#G0dȐ #Gthy%%% >|#F48b`;v7? _[ {1L\dk_lr(J-v=pۊk^߆6O߱.|M&R'Mm^(ث+LT9X+ bo:'ǍZ_œgEf\=VhEQQDIgp9IqU냼^~:cHouݎDUUj|Ζ-oPzBG7gME՛\'f3~P7}mqpmrKUUU`2hhhhB{ѣG2d6 x<,Meggwxyno*!-/0?߁&w@oo{?<}mk[i6zZ?O?ul[o->m;%ttGMddחn$4 s@J_%:5-wbA1P_U29­*vvmBUUzǬӇcUU1:O7S[&8twb S~ԡݽxbW$3K80x[U\Oӱvr{K51tLN.\Ʈr,P_o}F @(f_|6}S@U~g%`v>̄g3uhwn7[ Lɟ>Ib\x cMxcK &EaՒg* =[;\~e=}Ϋ-Χwb`TՃiY=FE.a^?( /sl=2?[ϼMEגme;ב>l_mYz {3=Qݕ|*-k$?t|/>9^Cnpq`F|cLT<;{Gpho)6 s𬑤/Ŀz_ov[;3UUIIInw@kBZ|<'G[4?P_~`Pc?>u yk0x$V۟,10 iVZm.;/4(܂H2%q2as;0eާ/QWVubABMA|>睔m)ow3k>۫><)n:v~ѧ^ݱxTu?f懳'p5loo4Xϖt2AЉl,xjCӒ[{? ؇bk0JƬd1w N;f ;6}1)K}?T2FaGjQL 5ǎPјCQ=.TZg*269З^fF21c3_~-#N%mu&&3q@٣ݞ.)fPHY c#UŚч z U۷_Uũ( ˛y3xRΗ0z㱓()9J:Dk6gLiǖԛ>I Ew;} w^(mQ;̝řóp cgÁ=+ؽ=z(.2y͕ Iut/w$)) ]`=(JoWʿ=0؁(ا9_{?= Lg'St}k#}ޛF6o/ PQuM1 T ٴ;$\njU[AQs8}ǢMmx!\Ȳׯ>ԣҒC^X=<*`wcsITTX}GcVwӂ65RWYჅ퍔;F]]]C.g\7 ߬{/48uBYc?U5%Gm"=M=n5&?v(#qќTP[? gC9啕TVsx}Ň2;g< *d?#5T}T;3w`Rܪ Jv(huJj7zX MR;oedX v{Ѓ1c{`neI `W0)U.2s` $##9i<&\pގ3[&PL~Lv$1).j*86ՙ?vFUEQ$pH ((&^Lh N{=w~A/M}5)Wr_/D-^0!!s!l6YYYyV* @BBB'(Z!-/ߙ'%&&S?),' ǿ~'p9N`/|ol <Ķx$[`zA>`'n[<=(|V+b.&5!LbB" ޽{ӧOrZ.T/ۓ? ۊ|*Rg]Sf2C]Ϻ-Syvf¥}3IxXo/m2[@QS9몋m>}oU;rzx0Y| 0U =ޛoTN< n|ki?gsޜ*̠z/f}nC>Mț49ljE0&W1\vDU79O&Q'Kqj2;?zc4f\#l%_/nbYSxS/"F]l fSSOTS;M Ab"`bVUD|uHDlv~77gr ??^y}V&}Oiw7٢Lfƞ5-ޘ6_<,9/XpkbrQZZO;Rn$''3 (\%P:tH t [cndVlY--ӿfrt:7mF~fXAt\8?؉|` 6#ߎl:{'M{쓫?>0{lz<ቿ6/\:b{~(&<:_Ycݲ^0+އ5M'09];m֦`o>/\%H_)**Re~25Dڷ`iOؖȷN%^?0<ZT[NO4 |Or4o?>3!S;@:߶}l}?> $ыرc|8p H#ơRl#翱⯣_/+PBlhhmg$ӏK!_7xo`h vrOfN[i?U/KV nU9~7No/G#ʑ#G` WP_ V6\# JkYN\^`??ƨW(N-`&Mk'M_ߖ>YJ%_/KۏeA~j <쓉o?I;H"A, >;?^ߵ(6ۆ[&0l_/K%Ŀ+eee+B!'0u}B!B$B!B@h!B!B B!!Z!BH-B!D$B!"@ !BIB!$B!B@h!B!B B!!Z!BH-B!D$B!"@ !BIB!$B!B@h!B!B` {6 !BѬ< {]UU}!BrPEQnJP*KK!B1/\\^3#xG:ujolld25'zxvz8wG#G0eB!OG hq\8nJP%ǐB!tM8_j]. r D{ǓІE9^aٴ&ъ5k0h ue&ˁnW骪*9s;J!q%)))˓hq:x<.Ũʲ=NS!!BZp}`1\ta Z7'f<r-vrt;V!//}2c 79$ QY_{Sg[]Qv=D\[eQ1o\[PT⪼aǸG|˞$Z-*suڵuV\.wsi]]<bb8qMvN/Z .=n\@m閑Jbb$Ѿl6SZVzoxj%!!y>}-'ؽ%*+m$%%2u"p8p`p㩳~#oRi&Nw?w'6>oɱμEM|YXI~(/)am'k?>=dQf9XwG+xh50h܅L<obh3Tښ}qwgyFw=C{RCΝ{$&&b6ql6\m~; `܋;g"uQLUUyWY&MBR $L&3bFGGφQQ8B}rqF( }ӧO/ңpx6x쳦'`2p9tpzp9R]y EQouۿ/~{mPK8;ƨ#c$&&RZZݻ߿? ''wINJQm7R_g :e <Ϩ㩳b56ʡy["Pt'KhD8p: Á $$$v{0y\ 8\֘M|zΜɱGepc#&b!I/0eJTЭ_~Iϙ3ú z86 |y$$w ppݔWV$&Q[[GEE94]M^Xzv {99JjJ JK2e ݺutL 8#ŘLW^(!zO|>njjkyu,y9n2窫úHOyfjjj_,{Mhzd&pvmT\B8.TUۤԣ؝n2ؾap֍vaYHSG-lڂaCU=L6M}SYY{n}sMT/_R\b {Sr%/zeҫIaNS'N$iΜnalL}P=se>Ͽ6mcStD\.l6'ݲP=*5i~P^QA=&&R]]z):hFrr2Ʉfk.0) 8Nf3.Dᅬ#~@RJu+Y(cz_ y̻0tPwɓزeKkfMôXxVhrp$vr6CՂڨpa<.ʙNtVZyKܭvuޒR#X{230ǹi* 4SdԐp`f .@i\wشKN9U?XV23R򐜜FMuw 9t*IH-=5=7O>$ov=d2HRb iii$$$`;h.[ n;s?GQcŭe5l0s)χJaaaXYZaʲjѴ:qi-ҺSq[9kX8M5nnDNϓsp"##]v7l޽ƌagKLM#pM/UE1_~ѣHxvIOO'qhRF歷͜j@TV̳fQ^^N(⨩&Oh&z@;].R하 )85-53rہDTj]_&{dnQD<|y_kpʄ q Ommsj? ^ۇz%jp>ꊄr,f3dTUj.*v:0Pg Gz J?ŵcF?=yޫtX߀lpPj٭`Bn&6͘Cw[&u(!]&о'ϾDfk?f۶LrJwZF :RRRq=X)`ty0FVVV/&K.妛njs 4|QwNee%_~%&cǎq1&tgrĉy?k(>|_x5keJQQ˓O>ŁNͦ]gF_G~vw|k!:J 4x kxil+nZ8r޹g}}]a6ihv;dvKmNXSSDٽvjbmPʐ!CyQU%Kp 7`vx)!XV*3|0]njjkl*g'`ME~j₂'mWbzpg2Mq8춦/;NC!ׯfGUAݭ|/\1rvt29|SRǃ^Uy v?MUI\qǃ%iș7ƥ'n IcC#21dPLJSrx'yBTzmȌ(g&0PǃZ_k"݋HkC4w̙iʅMypؾ}7?ѹOgSHK>1LdbꩩbM\.;&N? ^ˤ'HyJ28v:-FeJ ps=Rq{fNnQ]UE]]=5vZawh0qz<=, IuXGۍۃmG=UW]ȑ#^G㣏kcO=EFbEQPTSy9e@o~7T*eQ[؂"uQLSyk/HQ>ڰ fL?w.|@8jjPF ͡åPq8g?YMYmN>#Gp]wq 7`C^v8W>/CYyq)Lx%))>ۭby{ /Κ:s/ /BQn=1cƴNh+R;:+۷7 ׯ׶nŽꝉXQPOEHrf%ddz.bРAWPWWGMJ;6i΁ǭXä,r:@qƙwӧӣG6>9}GWx^'`vqd2С޲BUU8%Py%/֖HBt..<q\V3b1SrS'OshÂx<8蕕Acc#8TWWb!33zvFWj@t:ۉyéSw߱fnPc(-''7g=1|e I|M$yY; ]v1v~yj Ga\ DT;.Ccm-4(sej>L@1IŸsv{(vq\Rԗ;$;0NFdrn34,n I&b{пIKM!))Y1W^$''vZۜm^.@i)='qX,oĉ4w\|1{IѼBRVV#ѣi%/ʅѷO젎Quxƾ^\CY߿ :ݻ+EQX,!ocee%{媫 ~1LͷOТjSݪEŗDڵ+*kxꌏ>KgGew'& G".rZ %, I^z=WǜI >abkFrA ;X=>}hކݻ98%ȾFxGjZͳ:NTKP$;H 8D| ž@Du{WX!B/$#/]w]X,氎ef,C:$!qBp|ZohŎ;HIIjmL ?^#B85|./=%%%Q"4Z7M 1$B wh!B!~B!B$B!"@ !BIB!$B!B@h!B!B B!!Z!BH-B!D>VvܩuB!L>.SzB!D\>}zD7=>vԊ++n21I܍InLwcCܕ2UH}B!IJ8B!А!+Vh1I܍InLwcCc#++2@J8D4>&׺B!ppCB$#2qms<~IA|Z7ASO @iMe7aoMb`9җƥ-,CDƤUܣh=zh eqì!h@jojmNkJo%rݘ$y6&= 0ęÿiIBÓQ8Dlsn&s#'qmJuGrݘ***nЀSpdee3|Ω:Iwڭ_/BjЁ×,KAzk|D !"%"t0yJܭsBoМ$qo5WrJV\Ͷ.,k'wvX8"TQQJٸv'o6aMu6 64/y[}Ps6p\)vJ+k--bKuRvnFa֔lcj-+,mo衄 <'|ΖS;['I]nL5]lGN*,\Ըw\С8¶Et>4p9(ZSGO/xrT;[/RP[S.ccm-$жBۃ~ol|_˟~yܜ|._eOau͋Ⱦe%[$ƥl]ۼ?z{V x=(HExDz3 FS/}9d_! 3fwV3g]ϟ7eT\N~nyy,X ;?/|n͵m7/ʚHl#??7擛{3;26gM!*๛Q3s|߭.W摗KVRĬ5\; m|1r离ʰ;vɲm:^.?{wqqY$f.|%K7Lb[~` p l/9wͧpօg8*z5lg;/+{5"e|ͻw3‚;q|rYN3D-b\C3$O/$ZrĽXw׵̝;VgRL^: @ _4)\grwپ\ \SX۸M_o3w,~yC>5~eƼi-!/)oÚ{9ch&EO[v%1qܐ?3m˸c$4Et]e߱gkei|} ~=phaաV$3vVɭ4r',Ix%ݬC%1%HumY͌{C,>T6B|ދc_bqA:0{,z|Ͼ$/ZvhaӁ$&9Ҧb\sݓ@FgK @l{_Pw\ffٵ0;7Y?RM>(HdFH=Ζ=dObCu˸}]Ԏ3fNh.O%%Ї ON-XDDo%Z$t7\F0MbSKZ-}9sF +V:,㪔rQ u\jywlbdG[ p|k1ǜv|J^͏y ?FPn^NQpljmawc6\6^us}UMӽν֓VB#< o)vxwX:{vP5 rYޠ8G&X,K>&#;`o`iu58MAO7F3w^./=Ԃ8}\>繬|>c6%v蝛K/ڞݹc[{oy+x|䵬7|0klv MN)c{{ɝsE̹>$i߇oq[31?-^&_Mr8vT[OS?`ιRYc 1?~_Yʥyp_> -eѢE,\cǎ0x޷b Ǝ׸oy<*Fq"۹̦tN8!}i%=\|X?^͈!'1u} 8W5=3ϛ—+W0 nLFȄaØ1L9+̔M`^se2kdR233(0={ S/ bL^%>Ƃ^EN|"~vݬ =7DmE7aFX3?#Sn 'xi&^?ijo c+xEQ۷w,ecmiemqd{PSvӘFMT°w@5`%1M3 }s9/\EJO=9W,N,q=yo".Xղ(R>:eG+/~'rXԾySnz @%O]{ WZ%2xne4>;W_[M~9oaո?γ'=׶t[EpϧHd0V?M]<Υٜ|x^zcWH^T `ySd6G͋Ydi\;oOU`ѥ6Feܵ& Rv;Z29I)0g1߽`Z~3{Oy0W_A\D2C !h1)$ dWBĪJ E;o%xz =!%˩dϐ3a!agIL:s$MUI5%o65|u; ۙp6-lr4qn_X2zF2帰e0<Ŷ*w$;g2.x! ?JMc38].$pHw a>>ks_"e5OJ2_0c)$9L2>}O|m}- >0{,z|Ͼ$/ZvP pA! ~1/[xbCԨ I/^I-|t7Y[6G } -be_}}a 粯j$$zSl+-nnyWrrX#o`ʷǗ1o_΅(Yn\ܥ#,b.",t_h}KzrG=}c8F0f>K7Q唉9:6r{Z,طʺB5-acܰp>3Xk/6 \?~ed=nx+_&im] $,II޽ lqRn_ށco?odv~&]VÒ7vRZ`oSVk[$ =kN+٬˸jFk>.}l8Qþ $`4s&iH`o#gҥ?KS'vs;ֽ7밭~p:ط"Sm0f,h{\tB./y.F)h-,Ϡ\T_?(op5Ojg ߸ y'D4C L""q)1C!Lk?`ba|Pι3'rlfcЯēdcWAazY$_IGo<)`PO7s=ԙ?aX[<ڇdOO|meYz}k'wd})Ȼ(frk.#(-X˳/EUz,6V݌`Tq!&獣bΔAyR^%7Ss5??) kxpiCj8d2E#MT$#3u7!bB%QԮ//lnA]_Z2FHc#++WG֞ ٸ]q]"q$(Lr+?iYIWdҕC GL=D8`Ghy:[:xp0.vZL]t]'|=Quh -6D=^!1-b $HuKcEQMuIInÄ[H^ ӽ:kݒq"qטoBK6V $8= ڋ">Eܭ]I\]L_:p+?(%L}@%= <8w, -zgyD^¸dZnENJ7|"2"G""d(In<y͟T\N~nyy,XL̝;\'//ܹ-Am咗GQJ%mn^ҍK'/7śc+nq WEެ`> +ll~nyy泺m8'Wb `?AB79|n6~Z,Kn^yS`\s6K6~Ȃ37?Eͥ.\YG^^. X陘мhs->Gnsdr7.&W%©7^aݎx|A2Vqi~>sXWw l<񻫸)b+w7~qӾ=)xs;tHr˫-`mKxB=[Nzd2{b7 $;g2.x! %ob_***auR}<丞")) Wr]s. %㻏ɭo`ʷǗw?}Z?ؾmsӳ߳dvJKR1 5n /?a$os6SFt1?^jIS.ƒ4da}em ˰L{Tg<yLM,dy!g22 o|Ì'Y%k,a%oVշ)kV^8׹v+f_Bq/ ҙ1nI~q<***46Dj<6J}hn^%_C>aL3x;Rr[w jg&/a;y`>z^I/s"s\YGҔe\W.* vʫ Il.l><~w# F;1p&ʺs9ron _#~lO&Qʁ $7 G]wWI]s5\4q(g||ck8Tk5zl?66./}/aM٠acذ}|=qhƏ1{\yp{_mݾ^'fTrRLc!|1y5+߄#(pg Y9eE.w/>"SOgtL?7 +Ԭ⋛hq)otphx_cFXs&w.kPΨb7ClxT\ǝSVW3\mWO{| my<68/8!3y…Ժh".\c} ͭxQs5$t6K z:UZ/SCzY]ow9[~ UKeb.7* ӽs;\M*q1pCʮD{iK8Z|H'On [~H-z&yQS tg顫_S>m㋭ݘ6Rܦ{g$f&Cc#Ar^#On orP2t "m!Mz-C$z׌7t LnLwc0-zp;#"On OR!:KF JJ8 $y]!h=tk:ݘ*Raqwazp3=/:B|9'EW ]‘Kc?N0C'B?-: RƿkݒS tgg"CW-@cƽ3ZnLqwv$nnqv]K G["R#>ȵ[K-Ct !b\HUr 0`ܻ*NF㐸S]ꔘ{$-zp#m#:C8Ў\E$Ȥ*ͤCc?.2!/qrgQ4KJ8TRBII %%eԹ".=tG/OƊ{$텎awcKstE!pX'ȓgjDRHKRC'An ]rΉHYK8,lFy=@Po|;7!'"yxPC-ZD'w E$Ob[Ihna|_<LZ^뎿z. a^u~#uӇYHtCo+OFt籠# q=r (u|-\sQg@np8n 4L-燙%GOkFs+4ɿҺ%qHnL1 y1w-٤*zL֌79,AD˅⨄C[q[!4;:$"Z`=1DGR AK܍).aSqZ _!Rd)rl R"%bcOJ8DK8B!)P&R񧇮Vuܣ%G㐸Sݹ쯂e- 1w[w)er]Do rN t4)ٖzO!"%Nz+B4;zdZ]a'Oq+.m1X-q7o]L].%JnhcM0s3-% !D쐻B/b΢"CWXɅC.Z:h1>2hD>z!R䖺6,D9$`Ǥp!/{+Д5z$qt!h=t?wH]K1T3'q7&]]]]DS%f4\ A: CBNB vlŗ8B+["#AW1tgQb*,=tk+$^IZ$&M"FF㐸n.%w۸NJCc#F[Bk{j[.1-r W'ChMF|C]Y#C$z2)dqw912q{,kz0zp u.@4) Ǫp> !DlE^h"CWH]|J5swcUܥ#jtXcq!R |CBNʜD11k_ =BbBEE=3ꡫӤ|b:$ƤKGT&.w);V1 Om 2fXuCp zcB!!"CWȭ.ٸ J(q7&]ݹ쯂e-1 ]=-zp虌t NQ!Aĺ8.Ag bBk D @롫?d2FdEI܍I5[=$L"uZMzp  Οַ0_\HP떄K8 3 BKFL<<nTmTV`X\c%ƤiܥC3r.%z$E,Us+/E`uk8-3t aȭ@K#AV VH\g?4C @롫V`Tc.wc=Bt:Q.%z#oXh\Ec[J8>X^h!B"q4|ǦM;(Ev=z}#b&B'[1iwfkNwcC5K]%ks?vjFC\W\fF +1I܍IA:~@\q,k\4\Z ɭ@t|"[*X&k!"CJڤQmݯ?=Nނw6z̃D߽mKxXG^P_'Lby{䵼ƿC=O>#u$^7=!4Fp_W 592C[J܌!#x!Dz09΅QDYq2 GS¿^_Kɡ<N]ZuƷ$5[W|>ۢ&><I }c,a[W\pn䖟'k088]lɗK>mbݎ}=K|.Һ%By4xh !KB&a/83M]ʡʍ?iSFɳj$[twQwcJ嚭;rGfC#2 G ;rc$D)>%2gr|x#Ǵ0}&6Fh?rޛ9BJEZ|4yE3>t"D5[]Xu+t#" ߙtB,YyڿY}Fꡫ_D]cI7y{LnLwcG&UC^ϧqk0f=6>G[!Bt"^ȱ,DLQ8.=.~,56- ,;@(uK뚜Q'oB_x>W/Ͽnkù{Dx'qX'w 8Nž@a_ Vj6l@Y>6m=FꡫfDndu!q7&1I5kz0z{G8e^/Ҧb!$8)ܱ Y;?G\L"Or i\ x y-qIyfo;dWB2^1zM(\|ᴦ0dh Zbn;Jф`= ; y6[l+ vth DD5]{xz+V\jK7q7ދc}T>m%ǡ&alDOv/mKn=l1(_ӃC#1-¨bB%MѺV^v`r-%wBg@?nIH4c߾RERZGŹzk0XĤ`Ǽ&šLwߞ]3!q4F 3 Ӡ!J˻t$p`/˶i#(&{A(EqQzr;&qI˯ a.8TX8o`ηO}X'zE;NV4o>fLh=="%UߪPqmd,}K~YY]_)BH O`ZbM^XV"5(b%çP'y}X#CeQ?$ aLrk5{? Rr{@j!^dJ8NAr2A롫_D]g4fqoc`> ҋ,q3iZ  kHipJ6?>S2;5~f1S!Or O/ jgYĦp% },RCtݗpXswS83.D{2u%-R "DIE,h~8|-pXzsUfVcl;noWWu}eee-:JӮ~֌nėD@X}mJ1vGVǡzoBr|CƤGdpV_x%>[k;&J8b)U!"NoDGƠKlz/p0ꇗsޡzU<f4v8{o[ !?aBN1F޲H-"pd#_9:L>9FC5[<"?}B#ؤ$"&|5[J>N yE,/=="%\uUԑDZZRػu_CO U:7+ǐYƹB؝1C8ʇNHt_QeO .fמd dޓ!䶔cpqmc7yǾ(R!"-a7v7Tų_/Շ#X!zVY~}Csz#¨J1VH1 H-qwh==%K2Vxo뺄#L":rO&*dO"|b } EbTl; #>==-JXrwF!ɴ(Sp~u+^Ycy۸7i8& nWu]_vVVVzW/:[<"_Ɨwu'btq1u)V#rD4jC#> G$鶄#ne퍊a_ gB;12fzHϴ`=/Ժ%A鿄CH0`rRFkxFzHhM,Q&T&]RzG!+h #JI܍I:,H'UCܥ#܌$_UlrlI?ZaTr:)>:.a=g:(Gb4iH3- ]kvxH0 )'=х&#i"%Jw7dž-RnL\/c2/u'|Syaz%6JʰK%*4@IDAT{gݕpօs8u =LF"Њ{?δCvOjT]obܔp~wYݱ%Ν塯] 2A-r͎kPӿJGqh@jj0gJ6hCVXW\ Z:݉Jáۚ%0Kyw0LEXwfkgN5듣ޯOޟLC9U!@!wWzO'pPf"l>a}4UĖ/C~O4ZC}{WM+L[{Wk=lcW^7%0s|]^|,Y9p<_gz{K\?lf׮]'4+"-+XL~ge @LVVr{5($ǛR(C2_ᮁ֬{vSz>d2#XO$$.5dlF"t.l}km |ס(Lk7 k^xepϮcR蘿hy;~2y$!瑈9bL{t 7pԟ75 Iأ!Bt#z3˹B"[":*p%֧ڱ>n5L}ZS֭'˒(WF|Fϫ8]=94[ֳN :-S>A(} {W&YEa 'R:]pȭ@}H,%UsnN>#"ξu[~ou}~7E[9m]X\㋔wε)KW9f8[{;{ꕞAW^IWP~%VVޱp-q tD9|IspA+Vp2V:C W>!+9N 7deZ]nhr^?zl?=T> Ei}YHqwLI:S%&OѴCO_YJ14VJi n8Btǽ_~K_4HVB<")%1@Ҵڹz䖇QGfh-Ah E$rKaHkGGst˓,# z %m WBe]'q7vS:a$NjϢV{%n[w;/gdbY:TZe^ LI2qB춒k8ҎHRҼ xa@w/X4wYGzu۫ FS>*XR R)?\ϥÏ$:$%acd5IzD?ot$:IKsIt4{Wys:Hp' sQ0:YZExw8R uw0$~46xoF sSz(| !{;BG& t`)RB2t<4t\jCK{ ,`=7kvX+Dzx[[th:PK8tph*zp+^fI;zq75;L*:| Y)袨p&tX߬W0Q9rD/6|$ҡxRZDrU26rtqZo!Dv'IsBɴ.+1@y^U Ӡ!-q\IDÊ+³ z0zsco|FN&a{g7#6E&~{6g6Xzw7FD2BNs+_d|׼^7`L|M# .9ü"[*]K GGο\C5&jDnËΐuP=R6 R¡R8^'ηպ5~Ͼu$BY Ood:<0#brIM׹+qگO8E5dgp RHpvnkwNӺI"@$zlWZ:Zz.%kddf=CnQI-:¹>K^ܖxHYGK.kݒ._եxz;FE[qoDCPq֞s]LGLR6zp'nJ %1/Eyrĝ,u7ZsTJ8DIm !MqY1gF6:N&ȊZ ؜<`/_> IxpkOD_sc-XE7꣈NQӼ_)xkiݢYp)vx5h?щ$qFFmkZbz58W]! t[b?k:Kl 5[ mDHQ>_Ý@K GkbIuq;J[/b4oĽߞݜ<} l6绯C1)]E3Fy8:/=Э'}FQlHrnB;3笌!|RI^gnLN!DG"6(OѦ@VtZ7 QHEOrA(hmғvF㐸s+c+g6[wĄ9üdHNjiGu^J8J*0oֺ9BOa1O|ab?K5"AxD% q71E*DhK5H LҡtDky[pk^*,zgg'@kQKξ[2{ 6& w?6ejݜ|}B.̷AL>'5i\wh?k~z?֐PDW1xJ.g=QP;'q7 l1!׽反v鯬7Jz}E+i` z(a({@:OrS5d$㑘 D,1*}0K=H#;w+e`j\~~6;zAY&ABL`\-Bg|H/\ND_a0a7v,H3˅q59J4&B*=Q`)@{kNnLz{D, ;8y GW#QɆ: ݒʌA7DmYsquZQz:@;a1, ,B?_oo;k lKe t+EHL%eee-:*hW$&O9P 205fg_3S,Yu+D$s͖ݘb=~{6d6ZuA!--=A[t, NFO2s\IW"TwąO}3 )3 <t["I&OqryݶKh-'] UbvkOE[3.y-}_ڥuDuNwc>-Őw?Ү1ZNt7nnpxJu<||9.q'#5)hfp0#l1y'GO>6xuBF3x[h-ƋXH.&'Hy dbh:=ѷb _xE:!1!׽反vi;RǜaK:4tEq7^ GW'OѲ _Ɇ."dĆ!0֤+a.Ǔ)v\نY1)f< c0[!ʹ쯂e-Bt1n+Dn=:/Nj36ww.F'Hyگx]\XC:+V+:ȵ/Pdĸf9Ca[yd9Z)C܍U[vpfo7tqG-u\KhO7hiG.5@J87tK$_UIx%hY_8ugs"mz9"1Ab2D86dxpGD=NXr'q7&;}њ way&-#ԧ9cl7غL"DT%v)B̗h6V$fg%Xυ ),RŸ#bl &y06)Bt23z`LʂWo ,C$E{*ېڧ[<"NL{:1[$q?3r !z1J8:gmh~FX"#9)",|BT~lBtWr0\ǥ3{xпYɳɆ$Bte_>"O7_o&cFL;>˅ZS8SYU0wwZxQ6$ynn;cwƿk4(9ߍJ1`Y fC܍Q:y}jBDftRlх/)(ېQ6TE:4cΰOeHu^!lnk3ɿeH8$1IC3rR4nӟIL}PrFtBD?έPRuYZ8]a+{u;~űdCa\]xxP>Ol~0jus6kN0D יK}l(&EJ6"DxD4$q?_]bRW?kၺv߮T G<d !4&臔[Q#z_HFBdR!hWG'ZK1@wAFو6]DW.eƙ|7&{tDu$z{LpD*Q&lCRơ=)"D5.E$BHMޙBt(_UdWr֛~C$ZuKpGD_.3j+ QIܣ/up>.i.%;l"hhGJhyQu|MW%evc>e$y"FÄBmEBP Ck~o{~KdE9^qgA{[osK>6z(ER? 3ßxy!{OR1 ݞu}/wf-#lS[! P9p\zf5lb3ŦKqJ}F$zDしn C-^ml AQw*_DmPw[D8l1b! -mGG|4z&6y>(Bw0Ih !f"/,u' 5Wyu'3lD.#pyo9hͰԏȣ ϼ퉺ۓNATBR #Aj,3zaо`$[4fXGQw{ {φ?=U т=Qw{2Cݣ*p8bJ#'fE0q7~. eQw{HّT ]Qw{2Cݣ*q1v`(Fy@a0 ^\7؜-h3,#=ERܽR`ӎE7Z~'nOf;vqm,b&]}HEm~Du.;r퉺ۓNBB*\8iY*-э~k BTE8<_e~Du'n(ɣ-xu'3ԝբV[b03+![4fXGQw{2VJ]qݞPw"j-#MF`3t"^K.yFB,6]J->=<%@וȱ/@06fXGQw{2UkL.b͙3ǐi8ݞLYhG/uGQw{ICmP[y)zzf=w@_woK3.acH#r6kٍT]]+K؉R44xM3gi֬YJiMDu'S݌MtSaC uB*if4zfp] 9K;Ynl>c<3-h3,#=EM s6*#=D8Hs`CD8J9N.yN. !U t0W"l KWuU|޷π#We(֩]몵wonR\ߧ_al->OYC۔jST>ʢ ׂo7vVCGz;W_/0,Z?ݧG vSÒ._@o=5EoݩzR[;?HCw [?}W/:z*%?7=!tKpתٙkNPwˋWF|mZ:w ߭/A#\%P]!n}:F0} [UZh۵[ުVX/-*9_=@iIF/Wt{J RCC?1s+jt9$unmPh<2M(t,@ejKU7Ī_>j8uJ\G2gګg(Ὺҽ.;#[= r($k<9s^Tc~P3=8(3OӕA|rjQvb@T` PjdhZfݑy\7[_/+QAT6]Cg?yJtB}Ni}]JN -?#6-~PInf[Cڣ37J'Zı }X2BlGyYǢRFCv~FZ7r-R2[q#4>IN?:]SIIpBLFQWe*IҀ֍ReTsTrҖ?!qjK<>V7=XI_mSqI՟0zh_D@ LF}]h[:wqCuڙ0]?7_̇UPXeo_%@t裏Awj :HR4dRt͠9Ѐ(@Lրꕷkź9|= ^gMWIzl}ܰ_~Z=4}.ּ| [ߚJeAjg\t<h d @A@  4h 4@h @A0M}-\P/ )gjڵr FTz;vL_}Μ9s$'?15:/ٳgu-hv6Ǝn-TSN&ҘIz $-]T^}u=%W\k3uUw%eJq:o*ɌC@766IMMMZlnv?~6n(IԩSy!I*~egdhւ>'Z~dۏ XZDojhhГO>Z]VNR߾}cZ$i ΓҢr(řM}ڔۤj[󲔿yz+s;ު"e嬓]y];3'_U xn^Ke 6+''_R޺Vo][ڻEYm",TiAܝ[S**@744IgϞSO=>}ԩSzǴcٳGӟTWSS=O whs&(ge\u I*V՜תxvҊ{G-[ݦE#T\_+ք}+t۰ur˩aW2$wP۰Sa]@R@{1Z)]][5zJF?K(*mPk~^fT윯k߮9`UT7#&a􀶯sT~o}…ʚQs_gk皹y*Z9Ktqۥg@׫AOKj`3<#IuwjhhP}}}'OU~=|* 5zZ7HҴՕNgKQڭ5.]!FoDxA|/[ubGg]G][I4A&;_خC>ڭ -cH3N*V/>N~iە&{vLdgh)VQNeڥi2<] 5ָqpw8z'.wNѬG6}`dJfunR' >IGRF^jr)*hk4`/OSrvjޒL'$=j豚O5~> ?豣56r)Mξ-mI)Mr񉭧>*>Рo$͜9S_~^{5-[L}6~E8ڊOI_kè9:vnCʁ3'%0gCh;M}t^\qstdR|5pPY$QS$IUK}ݓ{@)>7KsZs}YҜRl($Js8$%gw)Z8 K?̼j?M{wV}}&MA]>v٣8y))J) L@^OGg~>oLܲU,53z@W^= kk2YLLLYLLLq$}cϞ={I7=8DI޻w+/ x!IW%tEXtdate:create2013-02-21T14:58:43+01:00|%tEXtdate:modify2013-02-21T14:58:43+01:00YtEXttiff:alphaunassociated(tEXttiff:endianmsbTu}ttEXttiff:photometricRGB ItEXttiff:rows-per-strip45Lk"IENDB`spykeutils-0.4.3/doc/source/apidoc/0000755000175000017500000000000012664623646015403 5ustar robrobspykeutils-0.4.3/doc/source/apidoc/spykeutils.rst0000644000175000017500000000027612664623646020356 0ustar robrob.. _apiref: API reference ============= spykeutils package ------------------ .. automodule:: spykeutils Subpackages ----------- .. toctree:: spykeutils.plot spykeutils.plugin spykeutils-0.4.3/doc/source/apidoc/spykeutils.plugin.rst0000644000175000017500000000011712664623646021645 0ustar robrobplugin Package ============== .. automodule:: spykeutils.plugin :members: spykeutils-0.4.3/doc/source/apidoc/spykeutils.plot.rst0000644000175000017500000000014012664623646021321 0ustar robrobspykeutils.plot package ======================= .. automodule:: spykeutils.plot :members: spykeutils-0.4.3/doc/source/requirements.txt0000644000175000017500000000002312664623646017423 0ustar robrobquantities neo mockspykeutils-0.4.3/doc/source/examples.rst0000644000175000017500000001563712664623646016530 0ustar robrob.. _examples: Examples ======== These examples demonstrate the usage of some functions in spykeutils. This includes the creation of a small Neo object hierarchy with toy data. Creating the sample data ------------------------ The functions in spykeutils work on electrophysiological data that is represented in Neo object hierarchies. Usually, you would load these objects from a file, but for the purpose of this demonstration we will manually create an object hierarchy to illustrate their structure. Note that most functions in spykeutils will also work with separate Neo data objects that are not contained in a complete hierarchy. First, we import the modules we will use: >>> import quantities as pq >>> import neo >>> import scipy as sp >>> import spykeutils.spike_train_generation as stg We start with some container objects: two segments that represent trials and three units (representing neurons) that produced the spike trains: >>> segments = [neo.Segment('Trial 1'), neo.Segment('Trial 2')] >>> units = [] >>> units.append(neo.Unit('Regular intervals')) >>> units.append(neo.Unit('Homogeneous Poisson')) >>> units.append(neo.Unit('Modulated Poisson')) We create some spike trains from regular intervals, a homogeneous Poisson process and a modulated Poisson process: >>> trains = [] >>> trains.append(neo.SpikeTrain(sp.linspace(0, 10, 40) * pq.s, 10 * pq.s)) >>> trains.append(neo.SpikeTrain(sp.linspace(0, 10, 60) * pq.s, 10 * pq.s)) >>> trains.append(stg.gen_homogeneous_poisson(5 * pq.Hz, t_stop=10 * pq.s)) >>> trains.append(stg.gen_homogeneous_poisson(7 * pq.Hz, t_stop=10 * pq.s)) >>> modulation = lambda t: sp.sin(3 * sp.pi * t / 10.0 / pq.s) / 2.0 + 0.5 >>> trains.append(stg.gen_inhomogeneous_poisson(modulation, 10 * pq.Hz, t_stop=10*pq.s)) >>> trains.append(stg.gen_inhomogeneous_poisson(modulation, 10 * pq.Hz, t_stop=10*pq.s)) Next, we create analog signals using the spike trains. First, we convolve all spike times with a mock spike waveform. >>> spike = sp.sin(-sp.linspace(0, 2 * sp.pi, 16)) >>> binned_trains = (sp.histogram(trains[0], bins=160000, range=(0,10))[0] + ... sp.histogram(trains[2], bins=160000, range=(0,10))[0] + ... sp.histogram(trains[4], bins=160000, range=(0,10))[0]) >>> train_waves = [sp.convolve(binned_trains, spike)] >>> binned_trains = (sp.histogram(trains[1], bins=160000, range=(0,10))[0] + ... sp.histogram(trains[3], bins=160000, range=(0,10))[0] + ... sp.histogram(trains[5], bins=160000, range=(0,10))[0]) >>> train_waves.append(sp.convolve(binned_trains, spike)) Now we add Gaussian noise and create four signals in each segment: >>> for i in range(8): ... sig = train_waves[i%2] + 0.2 * sp.randn(train_waves[i%2].shape[0]) ... signal = neo.AnalogSignal(sig * pq.uV, sampling_rate=16 * pq.kHz) ... signal.segment = segments[i%2] ... segments[i%2].analogsignals.append(signal) Now we create the relationships between the spike trains and container objects. Each unit has two spike trains, one in each segment: >>> segments[0].spiketrains = [trains[0], trains[2], trains[4]] >>> segments[1].spiketrains = [trains[1], trains[3], trains[5]] >>> units[0].spiketrains = trains[:2] >>> units[1].spiketrains = trains[2:4] >>> units[2].spiketrains = trains[4:6] >>> for s in segments: ... for st in s.spiketrains: ... st.segment = s >>> for u in units: ... for st in u.spiketrains: ... st.unit = u Now that our sample data is ready, we will use some of the function from spykeutils to analyze it. PSTH and ISI ------------ To create a peri stimulus time histogram from our spike trains, we call :func:`spykeutils.rate_estimation.psth`. This function can create multiple PSTHs and takes a dicionary of lists of spike trains. Since our spike trains were generated by three units, we will create three histograms, one for each unit: >>> import spykeutils.rate_estimation >>> st_dict = {} >>> st_dict[units[0]] = units[0].spiketrains >>> st_dict[units[1]] = units[1].spiketrains >>> st_dict[units[2]] = units[2].spiketrains >>> spykeutils.rate_estimation.psth(st_dict, 400 * pq.ms)[0] # doctest: +ELLIPSIS {: array([ 6.25, 5. , 5. , 5. , 3.75, ... :func:`spykeutils.rate_estimation.psth` returns two values: A dictionary with the resulting histograms and a Quantity 1D with the bin edges. If :mod:`guiqwt` is installed, we can also use the :mod:`spykeutils.plot` package to create a PSTH plot from our data (in this case we want a bar histogram and therefore only use spike trains from one unit): >>> import spykeutils.plot >>> spykeutils.plot.psth({units[2]: units[2].spiketrains}, bin_size=400 * pq.ms, bar_plot=True) # doctest: +SKIP Similiarily, we can create an interspike interval histogram plot with: >>> spykeutils.plot.isi({units[2]: units[2].spiketrains}, bin_size=30 * pq.ms, cut_off=300 * pq.ms, bar_plot=True) This will open a plot window like the following: .. image:: /img/isi.png Spike Density Estimation ------------------------ Similar to a PSTH, a spike density estimation gives an esimate of the instantaneous firing rate. Instead of binning, it is based on a kernel convolution which results in a smoother estimate. Creating and SDE with spykeutils works very similar to creating a PSTH. Instead of manually choosing the size of the Gaussian kernel, :func:`spykeutils.rate_estimation.spike_density_estimation` also supports finding the optimal kernel size automatically for each unit: >>> kernel_sizes = sp.logspace(2, 3.3, 100) * pq.ms >>> spykeutils.rate_estimation.spike_density_estimation(st_dict, optimize_steps=kernel_sizes)[0] # doctest: +ELLIPSIS {: array([ ... As with the PSTH, there is also a plot function for creating a spike density estimation. Here, we use both units because the function produces a line plot where both units can be shown at the same time: >>> spykeutils.plot.sde(st_dict, maximum_kernel=3000*pq.ms, optimize_steps=100) # doctest: +SKIP The resulting plot will look like the following: .. image:: /img/sde.png While spike density estimations are preferable to PSTHs in many cases, the picture also shows an important weakness: The estimation will generally be too low on margins. The areas where this happens become larger with kernel size, which is clearly visible from the rounded shape of the purple and pink curves (which should be flat because of the constant rate of the spike trains) with their very large kernel size. Signal Plot ----------- As a final example, we will again use the :mod:`spykeutils.plot` package to create a plot of the signals we created. This plot will also display the spike times from one of our spike trains. >>> spykeutils.plot.signals(segments[0].analogsignals, spike_trains=[segments[0].spiketrains[2]], show_waveforms=False) # doctest: +SKIP .. image:: /img/signal.png The plot shows all four signals from the first segments as well as the spike times of the inhomogeneous poisson process in the same segment. spykeutils-0.4.3/doc/source/static/0000755000175000017500000000000012664623646015433 5ustar robrobspykeutils-0.4.3/doc/source/static/icon.ico0000644000175000017500000001246612664623646017070 0ustar robrob h& (  ```(PPP%%%vvv'___...jjjooo777BBBF%b^^ɟᕓkge(""-ccdPPPFPNNsʤɤΐ USQuӾӴblddմbͷ *t]\{eɾ{\\\̱sIDD1}{{ϣԁob^`PՆxIQlhDŸ̂ߖwYzcbXqe^R?WIxhXͰG3J-"&ss]].~zzEusssd==ff7%aaeQ|jjv ~~*1FcϏ w\\G$( @ ^^^TTT222 ccc]]]777 NNNllleee<<< ZZZ}}}uuummmCCC dddAAA|||wwwHHHoooHHH###NNNxxxRRR,,, A"1-+732:64300*'%LyyyZZZ555}NJIۑXTT.*(ʏfff555 UOMKڹ`\Z0+)vǐ鸸/ ~c`__`cqqr(w_̭dֵdۺd޿d޿dۺdַdέd_w( [ҿbβdغddgdddddۺḓdbƿICC$da`¦\aa`aiS䠆aaaaaaϰa\~zy000 !X[ZYXW`NpWWWWWWWаWXOKItqpҽlU^nًQNk[YIqbNNNNNNNЏgZ5n鏌111M˘sIfZpdbVXKvESDn_PBFEEFb]JZCY>Z9M&ԓC> -'su::F&%Fvv}==vv_6*$,,]::F!!zgS%A5/+a\Z10|m#(PPQppHH.#dd.T ֿ߯ Կ 꿪 ̲ [4//6z ]YYzZZQQ1)۶۶۶Ԫ& SSU,||uuZZxx4+#@@@##%`IBBIQ&&)Qiih``[[ԃ90'9~|JII@6,$6ԡ224vߨXX=,3/'Z⨧* jrrwi  Fffg ť\\]669u""%a##&_000pUUXлpmiKhhhnȱidd8??G?+?9?1?1???=??spykeutils-0.4.3/doc/source/static/copybutton.js0000644000175000017500000000463112664623646020203 0ustar robrob$(document).ready(function() { /* Add a [>>>] button on the top-right corner of code samples to hide * the >>> and ... prompts and the output and thus make the code * copyable. */ var div = $('.highlight-python .highlight,' + '.highlight-python3 .highlight') var pre = div.find('pre'); // get the styles from the current theme pre.parent().parent().css('position', 'relative'); var hide_text = 'Hide the prompts and output'; var show_text = 'Show the prompts and output'; var border_width = pre.css('border-top-width'); var border_style = pre.css('border-top-style'); var border_color = pre.css('border-top-color'); var button_styles = { 'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0', 'border-color': border_color, 'border-style': border_style, 'border-width': border_width, 'color': border_color, 'text-size': '75%', 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em' } // create and add the button to all the code blocks that contain >>> div.each(function(index) { var jthis = $(this); if (jthis.find('.gp').length > 0) { var button = $('>>>'); button.css(button_styles) button.attr('title', hide_text); jthis.prepend(button); } // tracebacks (.gt) contain bare text elements that need to be // wrapped in a span to work with .nextUntil() (see later) jthis.find('pre:has(.gt)').contents().filter(function() { return ((this.nodeType == 3) && (this.data.trim().length > 0)); }).wrap(''); }); // define the behavior of the button when it's clicked $('.copybutton').toggle( function() { var button = $(this); button.parent().find('.go, .gp, .gt').hide(); button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden'); button.css('text-decoration', 'line-through'); button.attr('title', show_text); }, function() { var button = $(this); button.parent().find('.go, .gp, .gt').show(); button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible'); button.css('text-decoration', 'none'); button.attr('title', hide_text); }); }); spykeutils-0.4.3/doc/source/changelog.rst0000644000175000017500000000006512664623646016626 0ustar robrobChangelog ========= .. include:: ../../CHANGELOG.rstspykeutils-0.4.3/doc/source/conf.py0000644000175000017500000002207712664623646015453 0ustar robrob# -*- coding: utf-8 -*- # # spykeutils documentation build configuration file, created by # sphinx-quickstart on Mon Jul 30 16:54:09 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath( os.path.join(os.pardir, os.pardir))) # -- Mocking modules for Read the Docs compatibility --------------------------- try: import neo import scipy import PyQt4 import guiqwt except ImportError: from mock import MagicMock MOCK_MODULES = ['tables', 'guiqwt', 'guiqwt.builder', 'guiqwt.baseplot', 'guiqwt.plot', 'guiqwt.curve', 'guiqwt.image', 'guiqwt.tools', 'guiqwt.signals', 'guiqwt.config', 'guiqwt.events', 'guiqwt.shapes', 'PyQt4', 'PyQt4.QtCore', 'PyQt4.QtGui', 'guidata', 'guidata.configtools', 'guidata.qthelpers'] for mod_name in MOCK_MODULES: sys.modules[mod_name] = MagicMock() # Needed for spykeutils.plot.Dialog.PlotDialog class QDialog: pass class PlotManager: pass sys.modules['PyQt4.QtGui'].QDialog = QDialog sys.modules['guiqwt.plot'].PlotManager = PlotManager # Needed for spykeutils.plot.guiqwt_tools class CommandTool: pass class InteractiveTool: pass sys.modules['guiqwt.tools'].CommandTool = CommandTool sys.modules['guiqwt.tools'].InteractiveTool = InteractiveTool import spykeutils # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'spykeutils' copyright = u'2012, Robert Pröpper' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = spykeutils.__version__.rsplit('.', 1)[0] # The full version, including alpha/beta/rc tags. release = spykeutils.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'icon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'spykeutilsdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'spykeutils.tex', u'spykeutils Documentation', u'Robert Pröpper', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Add additional features --------------------------------------------------- def setup(app): app.add_javascript('copybutton.js') # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'spykeutils', u'spykeutils Documentation', [u'Robert Pröpper'], 1) ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'python': ('http://docs.python.org/', None), 'neo': ('http://neo.readthedocs.org/en/latest/', None), 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None), 'quantities': ('http://packages.python.org/quantities/', None), 'guiqwt': ('http://packages.python.org/guiqwt/', None), 'guidata': ('http://packages.python.org/guidata/', None), 'sklearn': ('http://scikit-learn.org/stable/', None)} spykeutils-0.4.3/doc/source/index.rst0000644000175000017500000000133112664623646016003 0ustar robrobWelcome to the documentation of spykeutils! =========================================== Based on the `Neo `_ framework, spykeutils is a Python library for analyzing and plotting neurophysiological data. It can be used by itself or in conjunction with `Spyke Viewer `_, a multi-platform GUI application for navigating electrophysiological datasets. A mailinglist for discussion and support is available at https://groups.google.com/d/forum/spyke-viewer Contents: .. toctree:: :maxdepth: 2 intro examples apidoc/spykeutils changelog acknowledgements Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` spykeutils-0.4.3/doc/source/acknowledgements.rst0000644000175000017500000000051612664623646020232 0ustar robrobAcknowledgements ================ spykeutils was created by Robert Pröpper [1]_, supported by the Research Training Group GRK 1589/1. Jan Gosmann [2]_ contributed and tested the spike train metric implementations. .. [1] Neural Information Processing Group, TU Berlin .. [2] Bernstein Center for Computational Neuroscience, Berlin spykeutils-0.4.3/doc/source/intro.rst0000644000175000017500000000505012664623646016031 0ustar robrobRequirements ============ Spykeutils is a pure Python package and therefore easy to install. It depends on the following additional packages: * Python_ >= 2.7 * neo_ >= 0.2.1 * scipy_ * guiqwt_ >= 2.1.4 (Optional, for plotting) * tables_ (Optional, for analysis results data management. Also known as PyTables.) * scikit-learn_ (Optional, for spike sorting quality analysis using Gaussian cluster overlap.) Please see the respective websites for instructions on how to install them if they are not present on your computer. If you use Linux, you might not have access rights to your Python package installation directory, depending on your configuration. In this case, you will have to execute all shell commands in this section with administrator privileges, e.g. by using ``sudo``. Download and Installation ========================= The easiest way to get spykeutils is from the Python Package Index. If you have pip_ installed:: $ pip install spykeutils Alternatively, if you have setuptools_:: $ easy_install spykeutils Users of NeuroDebian_ or its repositories (available for Debian and Ubuntu) can also install spykeutils using the package manager instead of pip_. The package is also available directly in recent Debian and Ubuntu installations, but might not be the most recent version. Install with:: $ sudo apt-get install python-spykeutils Alternatively, you can get the latest version directly from GitHub at https://github.com/rproepp/spykeutils. The master branch always contains the current stable version. If you want the latest development version, use the develop branch (selected by default). You can download the repository from the GitHub page or clone it using git and then install from the resulting folder:: $ python setup.py install Usage ===== For the most part, spykeutils is a collection of functions that work on Neo objects. Many functions also take quantities as parameters. Therefore, make sure to get an overview of :mod:`neo` and :mod:`quantities` before using spykeutils. Once you are familiar with these packages, have a look at the :ref:`examples` or head to the :ref:`apiref` to browse the contents of spykeutils. .. _`Python`: http://python.org .. _`neo`: http://neo.readthedocs.org .. _`guiqwt`: http://packages.python.org/guiqwt .. _`tables`: http://www.pytables.org .. _`quantities`: http://pypi.python.org/pypi/quantities .. _`scikit-learn`: http://scikit-learn.org .. _`pip`: http://pypi.python.org/pypi/pip .. _`scipy`: http://scipy.org .. _`setuptools`: http://pypi.python.org/pypi/setuptools .. _`NeuroDebian`: http://neuro.debian.net spykeutils-0.4.3/LICENSE0000644000175000017500000000274312664623646013112 0ustar robrobCopyright (c) 2012-2014, Robert Pröpper and conbtributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the author nor the names of the contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. spykeutils-0.4.3/CHANGELOG.rst0000644000175000017500000000415612664623646014126 0ustar robrob* Integration of pymuvr (when available) for faster calculation of van Rossum distance. Version 0.4.1 ------------- * Faster caching for Neo lazy loading. * Faster correlogram calculation. Version 0.4.0 ------------- * Correlogram plot supports new square plot matrix mode and count per second in addition to per segment. * New options in spike waveform plot. * DataProvider objects support transparent lazy loading for compatible IOs (currently only Hdf5IO). * DataProvider can be forced to use a certain IO instead of automatically determining it by file extension. * Load parameters for IOs can be specified in DataProvider. * IO class, IO parameters and IO plugins are saved in selections and properly used in startplugin.py * Qt implementation of ProgressBar available in plot.helper (moved from Spyke Viewer). * Loading support for IO plugins (moved from Spyke Viewer). Version 0.3.0 ------------- * Added implementations for various spike train metrics. * Added generation functions for poisson spike trains * Added tools module with various utility functions, e.g. binning spike trains or removing objects from Neo hierarchies. * Added explained variance function to spike sorting quality assessment. * Improved legends for plots involving colored lines. * Plots now have a minimum size and scroll bars appear if the plots would become too small. * Renamed plot.ISI to plot.isi for consistency Version 0.2.1 ------------- * Added "Home" and "Pan" tools for plots (useful when no middle mouse button is available). * Changed default grid in plots to show only major grid. * Added a method to DataProvider for refreshing views after object hierarchy changed. * New parameter for DataProvider AnalogSignal methods: AnalogSignalArrays can be automatically converted and returned. * Significantly improved speed of spike density estimation and optimal kernel size calculation. * Spike sorting quality assessment using gaussian clusters is now possible without prewhitening spikes or providing prewhitened means. * Renamed "spyke-plugin" script to "spykeplugin" Version 0.2.0 ------------- Initial documented public release. spykeutils-0.4.3/README.rst0000644000175000017500000000120112664623646013560 0ustar robrobOverview ======== Based on the `Neo `_ framework, spykeutils is a Python library for analyzing and plotting data from neurophysiological recordings. It can be used by itself or in conjunction with Spyke Viewer, a multi-platform GUI application for navigating electrophysiological datasets. For more information, see the documentation at http://spykeutils.readthedocs.org spykeutils was created by Robert Pröpper at the Neural Information Processing Group (TU Berlin), supported by the Research Training Group GRK 1589/1. Dependencies ============ * Python 2.7 * scipy * neo * Optional: guiqwt for plots spykeutils-0.4.3/setup.py0000644000175000017500000000371112664623646013613 0ustar robrob# -*- coding: utf-8 -*- from setuptools import setup, find_packages import os def find_version(): try: f = open(os.path.join('spykeutils', '__init__.py'), 'r') try: for line in f: if line.startswith('__version__'): rval = line.split()[-1][1:-1] break finally: f.close() except Exception: rval = '0' return rval DESC = """Based on the `Neo `_ framework, spykeutils is a Python library for analyzing and plotting data from neurophysiological recordings or simulations. It can be used by itself or in conjunction with Spyke Viewer, a multi-platform GUI application for navigating electrophysiological datasets. For more information, see the documentation at http://spykeutils.readthedocs.org""" if __name__ == "__main__": setup( name="spykeutils", version=find_version(), packages=find_packages(), install_requires=['scipy', 'quantities', 'neo>=0.2.1,<0.4'], extras_require={ 'plot': ['guiqwt>=2.1.4,<4.0'], 'plugin': ['tables'], 'quality_estimation': ['scikit-learn>=0.11'] }, entry_points={ 'console_scripts': ['spykeplugin = spykeutils.plugin.startplugin:main'] }, author='Robert Pröpper', maintainer='Robert Pröpper', description='Utilities for analyzing electrophysiological data', long_description=DESC, license='BSD', url='https://github.com/rproepp/spykeutils', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Bio-Informatics'])