diff --git a/.circleci/requirements_testing.txt b/.circleci/requirements_testing.txt index 7ccb1e849..f859f6a6c 100644 --- a/.circleci/requirements_testing.txt +++ b/.circleci/requirements_testing.txt @@ -3,7 +3,7 @@ h5py igor klusta tqdm -nixio>=1.4.3 +nixio>=1.5.0b2 axographio>=0.3.1 matplotlib ipython diff --git a/doc/source/install.rst b/doc/source/install.rst index 804711d82..da461aa5b 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -28,7 +28,7 @@ Neo will still install but the IO module that uses them will fail on loading: * h5py >= 2.5 for Hdf5IO, KwikIO * klusta for KwikIO * igor >= 0.2 for IgorIO - * nixio >= 1.2 for NixIO + * nixio >= 1.5 for NixIO * stfio for StimfitIO diff --git a/neo/core/basesignal.py b/neo/core/basesignal.py index 6fcf5fceb..583491d9c 100644 --- a/neo/core/basesignal.py +++ b/neo/core/basesignal.py @@ -10,7 +10,7 @@ http://docs.scipy.org/doc/numpy/user/basics.subclassing.html In brief: -* Constructor :meth:`__new__` for :class:`BaseSignal` doesn't exist. +* Constructor :meth:`__new__` for :class:`BaseSignal` doesn't exist. Only child objects :class:`AnalogSignal` and :class:`IrregularlySampledSignal` can be created. ''' @@ -39,9 +39,9 @@ class BaseSignal(DataObject): This class contains all common methods of both child classes. It uses the following child class attributes: - :_necessary_attrs: a list of the attributes that the class must have. + :_necessary_attrs: a list of the attributes that the class must have. - :_recommended_attrs: a list of the attributes that the class may + :_recommended_attrs: a list of the attributes that the class may optionally have. ''' @@ -60,9 +60,9 @@ def __array_finalize__(self, obj): User-specified values are only relevant for construction from constructor, and these are set in __new__ in the child object. - Then they are just copied over here. Default values for the + Then they are just copied over here. Default values for the specific attributes for subclasses (:class:`AnalogSignal` - and :class:`IrregularlySampledSignal`) are set in + and :class:`IrregularlySampledSignal`) are set in :meth:`_array_finalize_spec` ''' super(BaseSignal, self).__array_finalize__(obj) @@ -90,7 +90,7 @@ def _rescale(self, signal, units=None): ''' Check that units are present, and rescale the signal if necessary. This is called whenever a new signal is - created from the constructor. See :meth:`__new__' in + created from the constructor. See :meth:`__new__' in :class:`AnalogSignal` and :class:`IrregularlySampledSignal` ''' if units is None: @@ -183,8 +183,8 @@ def _copy_data_complement(self, other): setattr(self, attr[0], getattr(other, attr[0], None)) setattr(self, 'annotations', getattr(other, 'annotations', None)) - # Note: Array annotations cannot be copied because length of data can be changed - # here which would cause inconsistencies + # Note: Array annotations cannot be copied because length of data can be changed # here + # which would cause inconsistencies def __rsub__(self, other, *args): ''' @@ -264,16 +264,13 @@ def merge(self, other): kwargs[name] = attr_self else: kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other) - merged_annotations = merge_annotations(self.annotations, - other.annotations) + merged_annotations = merge_annotations(self.annotations, other.annotations) kwargs.update(merged_annotations) kwargs['array_annotations'] = self._merge_array_annotations(other) - signal = self.__class__(stack, units=self.units, dtype=self.dtype, - copy=False, t_start=self.t_start, - sampling_rate=self.sampling_rate, - **kwargs) + signal = self.__class__(stack, units=self.units, dtype=self.dtype, copy=False, + t_start=self.t_start, sampling_rate=self.sampling_rate, **kwargs) signal.segment = self.segment if hasattr(self, "lazy_shape"): @@ -281,12 +278,11 @@ def merge(self, other): # merge channel_index (move to ChannelIndex.merge()?) if self.channel_index and other.channel_index: - signal.channel_index = ChannelIndex( - index=np.arange(signal.shape[1]), - channel_ids=np.hstack([self.channel_index.channel_ids, - other.channel_index.channel_ids]), - channel_names=np.hstack([self.channel_index.channel_names, - other.channel_index.channel_names])) + signal.channel_index = ChannelIndex(index=np.arange(signal.shape[1]), + channel_ids=np.hstack( + [self.channel_index.channel_ids, other.channel_index.channel_ids]), + channel_names=np.hstack( + [self.channel_index.channel_names, other.channel_index.channel_names])) else: signal.channel_index = ChannelIndex(index=np.arange(signal.shape[1])) diff --git a/neo/core/dataobject.py b/neo/core/dataobject.py index b31e49992..ae13e4a5d 100644 --- a/neo/core/dataobject.py +++ b/neo/core/dataobject.py @@ -12,16 +12,23 @@ import numpy as np from neo.core.baseneo import BaseNeo, _check_annotations -# TODO: If yes, then should array annotations as a whole also be a property? - def _normalize_array_annotations(value, length): + """Check consistency of array annotations - """ Recursively check that value is either an array or list containing only "simple" types (number, string, date/time) or is a dict of those. - :return The array_annotations from value in correct form - :raises ValueError: In case value is not accepted as array_annotation(s) + + Args: + :value: (np.ndarray, list or dict) value to be checked for consistency + :length: (int) required length of the array annotation + + Returns: + np.ndarray The array_annotations from value in correct form + + Raises: + ValueError: In case value is not accepted as array_annotation(s) + """ # First stage, resolve dict of annotations into single annotations @@ -33,16 +40,14 @@ def _normalize_array_annotations(value, length): elif value is None: raise ValueError("Array annotations must not be None") - # If not array annotation, pass on to regular check and make it a list, - # that is checked again + # If not array annotation, pass on to regular check and make it a list, that is checked again # This covers array annotations with length 1 - elif not isinstance(value, (list, np.ndarray)) or \ - (isinstance(value, pq.Quantity) and value.shape == ()): + elif not isinstance(value, (list, np.ndarray)) or ( + isinstance(value, pq.Quantity) and value.shape == ()): _check_annotations(value) value = _normalize_array_annotations(np.array([value]), length) - # If array annotation, check for correct length, - # only single dimension and allowed data + # If array annotation, check for correct length, only single dimension and allowed data else: # Get length that is required for array annotations, which is equal to the length # of the object's data @@ -56,26 +61,26 @@ def _normalize_array_annotations(value, length): value = np.ndarray((0,)) val_length = own_length else: - # Note: len(o) also works for np.ndarray, it then uses the outmost dimension, + # Note: len(o) also works for np.ndarray, it then uses the first dimension, # which is exactly the desired behaviour here val_length = len(value) if not own_length == val_length: - raise ValueError("Incorrect length of array annotation: {} != {}". - format(val_length, own_length)) + raise ValueError( + "Incorrect length of array annotation: {} != {}".format(val_length, own_length)) # Local function used to check single elements of a list or an array # They must not be lists or arrays and fit the usual annotation data types def _check_single_elem(element): # Nested array annotations not allowed currently - # So if an entry is a list or a np.ndarray, it's not allowed, - # except if it's a quantity of length 1 - if isinstance(element, list) or \ - (isinstance(element, np.ndarray) and not - (isinstance(element, pq.Quantity) and element.shape == ())): + # If element is a list or a np.ndarray, it's not conform except if it's a quantity of + # length 1 + if isinstance(element, list) or (isinstance(element, np.ndarray) and not ( + isinstance(element, pq.Quantity) and ( + element.shape == () or element.shape == (1,)))): raise ValueError("Array annotations should only be 1-dimensional") if isinstance(element, dict): - raise ValueError("Dicts are not supported array annotations") + raise ValueError("Dictionaries are not supported as array annotations") # Perform regular check for elements of array or list _check_annotations(element) @@ -86,19 +91,19 @@ def _check_single_elem(element): # Thus just performing a check on the first element is enough # Even if it's a pq.Quantity, which can be scalar or array, this is still true # Because a np.ndarray cannot contain scalars and sequences simultaneously - try: + + # If length of data is 0, then nothing needs to be checked + if len(value): # Perform check on first element _check_single_elem(value[0]) - except IndexError: - # If length of data is 0, then nothing needs to be checked - pass + return value # In case of list, it needs to be ensured that all data are of the same type else: - # Conversion to numpy array makes all elements same type # Converts elements to most general type + try: value = np.array(value) # Except when scalar and non-scalar values are mixed, this causes conversion to fail @@ -137,17 +142,25 @@ class DataObject(BaseNeo, pq.Quantity): - returning it as pq.Quantity or np.ndarray - handling of array_annotations - Array_annotations are a kind of annotations that contain metadata for every data point, + Array_annotations are a kind of annotation that contains metadata for every data point, i.e. per timestamp (in SpikeTrain, Event and Epoch) or signal channel (in AnalogSignal and IrregularlySampledSignal). They can contain the same data types as regular annotations, but are always represented as numpy arrays of the same length as the number of data points of the annotated neo object. + + Args: + name (str, optional): Name of the Neo object + description (str, optional): Human readable string description of the Neo object + file_origin (str, optional): Origin of the data contained in this Neo object + array_annotations (dict, optional): Dictionary containing arrays / lists which annotate + individual data points of the Neo object. + kwargs: regular annotations stored in a separate annotation dictionary ''' def __init__(self, name=None, description=None, file_origin=None, array_annotations=None, **annotations): """ - This method is called from each data object and initializes the newly created object by + This method is called by each data object and initializes the newly created object by adding array annotations and calling __init__ of the super class, where more annotations and attributes are processed. """ @@ -157,13 +170,14 @@ def __init__(self, name=None, description=None, file_origin=None, array_annotati if array_annotations is not None: self.array_annotate(**array_annotations) - BaseNeo.__init__(self, name=name, description=description, - file_origin=file_origin, **annotations) + BaseNeo.__init__(self, name=name, description=description, file_origin=file_origin, + **annotations) def array_annotate(self, **array_annotations): """ - Add annotations (non-standardized metadata) as arrays to a Neo data object. + Add array annotations (annotations for individual data points) as arrays to a Neo data + object. Example: @@ -218,8 +232,6 @@ def _merge_array_annotations(self, other): :return Merged array_annotations ''' - # Make sure the user is notified for every object about which exact annotations are lost - warnings.simplefilter('always', UserWarning) merged_array_annotations = {} omitted_keys_self = [] # Concatenating arrays for each key @@ -234,7 +246,7 @@ def _merge_array_annotations(self, other): except ValueError: raise ValueError("Could not merge array annotations " "due to different units") - merged_array_annotations[key] = np.append(value, other_value)*value.units + merged_array_annotations[key] = np.append(value, other_value) * value.units else: merged_array_annotations[key] = np.append(value, other_value) @@ -243,17 +255,15 @@ def _merge_array_annotations(self, other): omitted_keys_self.append(key) continue # Also save omitted keys from 'other' - omitted_keys_other = [key for key in other.array_annotations - if key not in self.array_annotations] + omitted_keys_other = [key for key in other.array_annotations if + key not in self.array_annotations] + # Warn if keys were omitted if omitted_keys_other or omitted_keys_self: warnings.warn("The following array annotations were omitted, because they were only " "present in one of the merged objects: {} from the one that was merged " - "into and {} from the one that was merged into the other". - format(omitted_keys_self, omitted_keys_other), UserWarning) - - # Reset warning filter to default state - warnings.simplefilter("default") + "into and {} from the one that was merged into the other" + "".format(omitted_keys_self, omitted_keys_other), UserWarning) # Return the merged array_annotations return merged_array_annotations @@ -270,9 +280,7 @@ def rescale(self, units): return self.copy() # Rescale the object into a new object - # Works for all objects currently - obj = self.duplicate_with_new_data(signal=self.view(pq.Quantity).rescale(dim), - units=units) + obj = self.duplicate_with_new_data(signal=self.view(pq.Quantity).rescale(dim), units=units) # Expected behavior is deepcopy, so deepcopying array_annotations obj.array_annotations = copy.deepcopy(self.array_annotations) @@ -315,12 +323,11 @@ def _get_arr_ann_length(self): This is the last dimension of every object. :return Required length of array annotations for this object """ - # Number of items is last dimension in current objects - # This holds true for the current implementation + # Number of items is last dimension in of data object # This method should be overridden in case this changes try: length = self.shape[-1] - # XXX This is because __getitem__[int] returns a scalar Epoch/Event/SpikeTrain + # Note: This is because __getitem__[int] returns a scalar Epoch/Event/SpikeTrain # To be removed if __getitem__[int] is changed except IndexError: length = 1 diff --git a/neo/core/epoch.py b/neo/core/epoch.py index b11fe27f9..8401bb0ed 100644 --- a/neo/core/epoch.py +++ b/neo/core/epoch.py @@ -22,18 +22,19 @@ def _new_epoch(cls, times=None, durations=None, labels=None, units=None, name=None, - description=None, file_origin=None, array_annotations=None, - annotations=None, segment=None): + description=None, file_origin=None, array_annotations=None, annotations=None, + segment=None): ''' A function to map epoch.__new__ to function that does not do the unit checking. This is needed for pickle to work. ''' e = Epoch(times=times, durations=durations, labels=labels, units=units, name=name, file_origin=file_origin, description=description, - array_annotations=array_annotations, **annotations) + array_annotations=array_annotations, **annotations) e.segment = segment return e + class Epoch(DataObject): ''' Array of epochs. @@ -57,10 +58,10 @@ class Epoch(DataObject): dtype='|S4') *Required attributes/properties*: - :times: (quantity array 1D) The starts of the time periods. - :durations: (quantity array 1D) The length of the time period. - :labels: (numpy.array 1D dtype='S') Names or labels for the - time periods. + :times: (quantity array 1D) The start times of each time period. + :durations: (quantity array 1D or quantity scalar) The length(s) of each time period. + If a scalar, the same value is used for all time periods. + :labels: (numpy.array 1D dtype='S') Names or labels for the time periods. *Recommended attributes/properties*: :name: (str) A label for the dataset, @@ -78,19 +79,24 @@ class Epoch(DataObject): _single_parent_objects = ('Segment',) _quantity_attr = 'times' - _necessary_attrs = (('times', pq.Quantity, 1), - ('durations', pq.Quantity, 1), + _necessary_attrs = (('times', pq.Quantity, 1), ('durations', pq.Quantity, 1), ('labels', np.ndarray, 1, np.dtype('S'))) - def __new__(cls, times=None, durations=None, labels=None, units=None, - name=None, description=None, file_origin=None, array_annotations=None, - **annotations): + def __new__(cls, times=None, durations=None, labels=None, units=None, name=None, + description=None, file_origin=None, array_annotations=None, **annotations): if times is None: times = np.array([]) * pq.s if durations is None: durations = np.array([]) * pq.s + elif durations.size != times.size: + if durations.size == 1: + durations = durations * np.ones_like(times.magnitude) + else: + raise ValueError("Durations array has different length to times") if labels is None: labels = np.array([], dtype='S') + elif len(labels) != times.size: + raise ValueError("Labels array has different length to times") if units is None: # No keyword units, so get from `times` try: @@ -106,10 +112,9 @@ def __new__(cls, times=None, durations=None, labels=None, units=None, # check to make sure the units are time # this approach is much faster than comparing the # reference dimensionality - if (len(dim) != 1 or list(dim.values())[0] != 1 or - not isinstance(list(dim.keys())[0], pq.UnitTime)): - ValueError("Unit %s has dimensions %s, not [time]" % - (units, dim.simplified)) + if (len(dim) != 1 or list(dim.values())[0] != 1 or not isinstance(list(dim.keys())[0], + pq.UnitTime)): + ValueError("Unit %s has dimensions %s, not [time]" % (units, dim.simplified)) obj = pq.Quantity.__new__(cls, times, units=dim) obj.labels = labels @@ -117,15 +122,13 @@ def __new__(cls, times=None, durations=None, labels=None, units=None, obj.segment = None return obj - def __init__(self, times=None, durations=None, labels=None, units=None, - name=None, description=None, file_origin=None, array_annotations=None, - **annotations): + def __init__(self, times=None, durations=None, labels=None, units=None, name=None, + description=None, file_origin=None, array_annotations=None, **annotations): ''' Initialize a new :class:`Epoch` instance. ''' - DataObject.__init__(self, name=name, file_origin=file_origin, - description=description, array_annotations=array_annotations, - **annotations) + DataObject.__init__(self, name=name, file_origin=file_origin, description=description, + array_annotations=array_annotations, **annotations) def __reduce__(self): ''' @@ -159,8 +162,8 @@ def __repr__(self): else: labels = self.labels - objs = ['%s@%s for %s' % (label, time, dur) for - label, time, dur in zip(labels, self.times, self.durations)] + objs = ['%s@%s for %s' % (label, time, dur) for label, time, dur in + zip(labels, self.times, self.durations)] return '' % ', '.join(objs) def _repr_pretty_(self, pp, cycle): @@ -190,6 +193,14 @@ def __getitem__(self, i): pass return obj + def __getslice__(self, i, j): + ''' + Get a slice from :attr:`i` to :attr:`j`.attr[0] + + Doesn't get called in Python 3, :meth:`__getitem__` is called instead + ''' + return self.__getitem__(slice(i, j)) + @property def times(self): return pq.Quantity(self) @@ -215,8 +226,7 @@ def merge(self, other): else: kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other) - merged_annotations = merge_annotations(self.annotations, - other.annotations) + merged_annotations = merge_annotations(self.annotations, other.annotations) kwargs.update(merged_annotations) kwargs['array_annotations'] = self._merge_array_annotations(other) @@ -232,15 +242,13 @@ def _copy_data_complement(self, other): ''' # Note: Array annotations cannot be copied because length of data could be changed # here which would cause inconsistencies. This is instead done locally. - for attr in ("name", "file_origin", - "description", "annotations"): + for attr in ("name", "file_origin", "description", "annotations"): setattr(self, attr, getattr(other, attr, None)) def __deepcopy__(self, memo): cls = self.__class__ - new_ep = cls(times=self.times, durations=self.durations, - labels=self.labels, units=self.units, - name=self.name, description=self.description, + new_ep = cls(times=self.times, durations=self.durations, labels=self.labels, + units=self.units, name=self.name, description=self.description, file_origin=self.file_origin) new_ep.__dict__.update(self.__dict__) memo[id(self)] = new_ep diff --git a/neo/core/event.py b/neo/core/event.py index c15e35b7b..0e58e1dbf 100644 --- a/neo/core/event.py +++ b/neo/core/event.py @@ -15,15 +15,15 @@ import numpy as np import quantities as pq -from neo.core.baseneo import BaseNeo, merge_annotations +from neo.core.baseneo import merge_annotations from neo.core.dataobject import DataObject, ArrayDict +from neo.core.epoch import Epoch PY_VER = sys.version_info[0] -def _new_event(cls, times=None, labels=None, units=None, name=None, - file_origin=None, description=None, array_annotations=None, - annotations=None, segment=None): +def _new_event(cls, times=None, labels=None, units=None, name=None, file_origin=None, + description=None, array_annotations=None, annotations=None, segment=None): ''' A function to map Event.__new__ to function that does not do the unit checking. This is needed for pickle to work. @@ -74,8 +74,7 @@ class Event(DataObject): _single_parent_objects = ('Segment',) _quantity_attr = 'times' - _necessary_attrs = (('times', pq.Quantity, 1), - ('labels', np.ndarray, 1, np.dtype('S'))) + _necessary_attrs = (('times', pq.Quantity, 1), ('labels', np.ndarray, 1, np.dtype('S'))) def __new__(cls, times=None, labels=None, units=None, name=None, description=None, file_origin=None, array_annotations=None, **annotations): @@ -98,10 +97,9 @@ def __new__(cls, times=None, labels=None, units=None, name=None, description=Non # check to make sure the units are time # this approach is much faster than comparing the # reference dimensionality - if (len(dim) != 1 or list(dim.values())[0] != 1 or - not isinstance(list(dim.keys())[0], pq.UnitTime)): - ValueError("Unit %s has dimensions %s, not [time]" % - (units, dim.simplified)) + if (len(dim) != 1 or list(dim.values())[0] != 1 or not isinstance(list(dim.keys())[0], + pq.UnitTime)): + ValueError("Unit %s has dimensions %s, not [time]" % (units, dim.simplified)) obj = pq.Quantity(times, units=dim).view(cls) obj.labels = labels @@ -113,17 +111,16 @@ def __init__(self, times=None, labels=None, units=None, name=None, description=N ''' Initialize a new :class:`Event` instance. ''' - DataObject.__init__(self, name=name, file_origin=file_origin, - description=description, array_annotations=array_annotations, - **annotations) + DataObject.__init__(self, name=name, file_origin=file_origin, description=description, + array_annotations=array_annotations, **annotations) def __reduce__(self): ''' Map the __new__ function onto _new_event, so that pickle works ''' - return _new_event, (self.__class__, np.array(self), self.labels, self.units, - self.name, self.file_origin, self.description, self.array_annotations, + return _new_event, (self.__class__, np.array(self), self.labels, self.units, self.name, + self.file_origin, self.description, self.array_annotations, self.annotations, self.segment) def __array_finalize__(self, obj): @@ -148,8 +145,7 @@ def __repr__(self): labels = self.labels.astype('U') else: labels = self.labels - objs = ['%s@%s' % (label, time) for label, time in zip(labels, - self.times)] + objs = ['%s@%s' % (label, time) for label, time in zip(labels, self.times)] return '' % ', '.join(objs) def _repr_pretty_(self, pp, cycle): @@ -189,8 +185,8 @@ def merge(self, other): else: kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other) - merged_annotations = merge_annotations(self.annotations, - other.annotations) + print('Event: merge annotations') + merged_annotations = merge_annotations(self.annotations, other.annotations) kwargs.update(merged_annotations) @@ -207,19 +203,16 @@ def _copy_data_complement(self, other): ''' # Note: Array annotations cannot be copied # because they are linked to their respective timestamps - for attr in ("name", "file_origin", "description", - "annotations"): - setattr(self, attr, getattr(other, attr, None)) - # Note: Array annotations cannot be copied because length of data can be changed - # here which would cause inconsistencies - # This includes labels and durations!!! + for attr in ("name", "file_origin", "description", "annotations"): + setattr(self, attr, getattr(other, attr, + None)) # Note: Array annotations cannot be copied + # because length of data can be changed # here which would cause inconsistencies # + # This includes labels and durations!!! def __deepcopy__(self, memo): cls = self.__class__ - new_ev = cls(times=self.times, - labels=self.labels, units=self.units, - name=self.name, description=self.description, - file_origin=self.file_origin) + new_ev = cls(times=self.times, labels=self.labels, units=self.units, name=self.name, + description=self.description, file_origin=self.file_origin) new_ev.__dict__.update(self.__dict__) memo[id(self)] = new_ev for k, v in self.__dict__.items(): @@ -279,3 +272,54 @@ def get_labels(self): return self.array_annotations['labels'] labels = property(get_labels, set_labels) + + def to_epoch(self, pairwise=False, durations=None): + """ + Returns a new Epoch object based on the times and labels in the Event object. + + This method has three modes of action. + + 1. By default, an array of `n` event times will be transformed into + `n-1` epochs, where the end of one epoch is the beginning of the next. + This assumes that the events are ordered in time; it is the + responsibility of the caller to check this is the case. + 2. If `pairwise` is True, then the event times will be taken as pairs + representing the start and end time of an epoch. The number of + events must be even, otherwise a ValueError is raised. + 3. If `durations` is given, it should be a scalar Quantity or a + Quantity array of the same size as the Event. + Each event time is then taken as the start of an epoch of duration + given by `durations`. + + `pairwise=True` and `durations` are mutually exclusive. A ValueError + will be raised if both are given. + + If `durations` is given, epoch labels are set to the corresponding + labels of the events that indicate the epoch start + If `durations` is not given, then the event labels A and B bounding + the epoch are used to set the labels of the epochs in the form 'A-B'. + """ + + if pairwise: + # Mode 2 + if durations is not None: + raise ValueError("Inconsistent arguments. " + "Cannot give both `pairwise` and `durations`") + if self.size % 2 != 0: + raise ValueError("Pairwise conversion of events to epochs" + " requires an even number of events") + times = self.times[::2] + durations = self.times[1::2] - times + labels = np.array( + ["{}-{}".format(a, b) for a, b in zip(self.labels[::2], self.labels[1::2])]) + elif durations is None: + # Mode 1 + times = self.times[:-1] + durations = np.diff(self.times) + labels = np.array( + ["{}-{}".format(a, b) for a, b in zip(self.labels[:-1], self.labels[1:])]) + else: + # Mode 3 + times = self.times + labels = self.labels + return Epoch(times=times, durations=durations, labels=labels) diff --git a/neo/core/spiketrain.py b/neo/core/spiketrain.py index da521d932..cbd5fec14 100644 --- a/neo/core/spiketrain.py +++ b/neo/core/spiketrain.py @@ -39,10 +39,9 @@ def check_has_dimensions_time(*values): errmsgs = [] for value in values: dim = value.dimensionality - if (len(dim) != 1 or list(dim.values())[0] != 1 or - not isinstance(list(dim.keys())[0], pq.UnitTime)): - errmsgs.append("value %s has dimensions %s, not [time]" % - (value, dim.simplified)) + if (len(dim) != 1 or list(dim.values())[0] != 1 or not isinstance(list(dim.keys())[0], + pq.UnitTime)): + errmsgs.append("value %s has dimensions %s, not [time]" % (value, dim.simplified)) if errmsgs: raise ValueError("\n".join(errmsgs)) @@ -69,11 +68,9 @@ def _check_time_in_range(value, t_start, t_stop, view=False): t_stop = t_stop.view(np.ndarray) if value.min() < t_start: - raise ValueError("The first spike (%s) is before t_start (%s)" % - (value, t_start)) + raise ValueError("The first spike (%s) is before t_start (%s)" % (value, t_start)) if value.max() > t_stop: - raise ValueError("The last spike (%s) is after t_stop (%s)" % - (value, t_stop)) + raise ValueError("The last spike (%s) is after t_stop (%s)" % (value, t_stop)) def _check_waveform_dimensions(spiketrain): @@ -92,25 +89,21 @@ def _check_waveform_dimensions(spiketrain): if waveforms.shape[0] != len(spiketrain): raise ValueError("Spiketrain length (%s) does not match to number of " - "waveforms present (%s)" % (len(spiketrain), - waveforms.shape[0])) + "waveforms present (%s)" % (len(spiketrain), waveforms.shape[0])) -def _new_spiketrain(cls, signal, t_stop, units=None, dtype=None, - copy=True, sampling_rate=1.0 * pq.Hz, - t_start=0.0 * pq.s, waveforms=None, left_sweep=None, - name=None, file_origin=None, description=None, - array_annotations=None, annotations=None, - segment=None, unit=None): +def _new_spiketrain(cls, signal, t_stop, units=None, dtype=None, copy=True, + sampling_rate=1.0 * pq.Hz, t_start=0.0 * pq.s, waveforms=None, left_sweep=None, + name=None, file_origin=None, description=None, array_annotations=None, + annotations=None, segment=None, unit=None): ''' A function to map :meth:`BaseAnalogSignal.__new__` to function that does not do the unit checking. This is needed for :module:`pickle` to work. ''' if annotations is None: annotations = {} - obj = SpikeTrain(signal, t_stop, units, dtype, copy, sampling_rate, - t_start, waveforms, left_sweep, name, file_origin, - description, array_annotations, **annotations) + obj = SpikeTrain(signal, t_stop, units, dtype, copy, sampling_rate, t_start, waveforms, + left_sweep, name, file_origin, description, array_annotations, **annotations) obj.segment = segment obj.unit = unit return obj @@ -207,29 +200,23 @@ class SpikeTrain(DataObject): _single_parent_objects = ('Segment', 'Unit') _quantity_attr = 'times' - _necessary_attrs = (('times', pq.Quantity, 1), - ('t_start', pq.Quantity, 0), + _necessary_attrs = (('times', pq.Quantity, 1), ('t_start', pq.Quantity, 0), ('t_stop', pq.Quantity, 0)) - _recommended_attrs = ((('waveforms', pq.Quantity, 3), - ('left_sweep', pq.Quantity, 0), - ('sampling_rate', pq.Quantity, 0)) + - BaseNeo._recommended_attrs) + _recommended_attrs = ((('waveforms', pq.Quantity, 3), ('left_sweep', pq.Quantity, 0), + ('sampling_rate', pq.Quantity, 0)) + BaseNeo._recommended_attrs) - def __new__(cls, times, t_stop, units=None, dtype=None, copy=True, - sampling_rate=1.0 * pq.Hz, t_start=0.0 * pq.s, waveforms=None, - left_sweep=None, name=None, file_origin=None, description=None, - array_annotations=None, **annotations): + def __new__(cls, times, t_stop, units=None, dtype=None, copy=True, sampling_rate=1.0 * pq.Hz, + t_start=0.0 * pq.s, waveforms=None, left_sweep=None, name=None, file_origin=None, + description=None, array_annotations=None, **annotations): ''' Constructs a new :clas:`Spiketrain` instance from data. This is called whenever a new :class:`SpikeTrain` is created from the constructor, but not when slicing. ''' - if len(times) != 0 and waveforms is not None and len(times) != \ - waveforms.shape[0]: + if len(times) != 0 and waveforms is not None and len(times) != waveforms.shape[0]: # len(times)!=0 has been used to workaround a bug occuring during neo import - raise ValueError( - "the number of waveforms should be equal to the number of spikes") + raise ValueError("the number of waveforms should be equal to the number of spikes") # Make sure units are consistent # also get the dimensionality now since it is much faster to feed @@ -278,8 +265,8 @@ def __new__(cls, times, t_stop, units=None, dtype=None, copy=True, # check to make sure the units are time # this approach is orders of magnitude faster than comparing the # reference dimensionality - if (len(dim) != 1 or list(dim.values())[0] != 1 or - not isinstance(list(dim.keys())[0], pq.UnitTime)): + if (len(dim) != 1 or list(dim.values())[0] != 1 or not isinstance(list(dim.keys())[0], + pq.UnitTime)): ValueError("Unit has dimensions %s, not [time]" % dim.simplified) # Construct Quantity from data @@ -288,16 +275,17 @@ def __new__(cls, times, t_stop, units=None, dtype=None, copy=True, # if the dtype and units match, just copy the values here instead # of doing the much more expensive creation of a new Quantity # using items() is orders of magnitude faster - if (hasattr(t_start, 'dtype') and t_start.dtype == obj.dtype and - hasattr(t_start, 'dimensionality') and - t_start.dimensionality.items() == dim.items()): + if (hasattr(t_start, 'dtype') + and t_start.dtype == obj.dtype + and hasattr(t_start, 'dimensionality') + and t_start.dimensionality.items() == dim.items()): obj.t_start = t_start.copy() else: obj.t_start = pq.Quantity(t_start, units=dim, dtype=obj.dtype) - if (hasattr(t_stop, 'dtype') and t_stop.dtype == obj.dtype and - hasattr(t_stop, 'dimensionality') and - t_stop.dimensionality.items() == dim.items()): + if (hasattr(t_stop, 'dtype') and t_stop.dtype == obj.dtype + and hasattr(t_stop, 'dimensionality') + and t_stop.dimensionality.items() == dim.items()): obj.t_stop = t_stop.copy() else: obj.t_stop = pq.Quantity(t_stop, units=dim, dtype=obj.dtype) @@ -316,10 +304,10 @@ def __new__(cls, times, t_stop, units=None, dtype=None, copy=True, return obj - def __init__(self, times, t_stop, units=None, dtype=np.float, - copy=True, sampling_rate=1.0 * pq.Hz, t_start=0.0 * pq.s, - waveforms=None, left_sweep=None, name=None, file_origin=None, - description=None, array_annotations=None, **annotations): + def __init__(self, times, t_stop, units=None, dtype=np.float, copy=True, + sampling_rate=1.0 * pq.Hz, t_start=0.0 * pq.s, waveforms=None, left_sweep=None, + name=None, file_origin=None, description=None, array_annotations=None, + **annotations): ''' Initializes a newly constructed :class:`SpikeTrain` instance. ''' @@ -330,9 +318,8 @@ def __init__(self, times, t_stop, units=None, dtype=np.float, # Calls parent __init__, which grabs universally recommended # attributes and sets up self.annotations - DataObject.__init__(self, name=name, file_origin=file_origin, - description=description, array_annotations=array_annotations, - **annotations) + DataObject.__init__(self, name=name, file_origin=file_origin, description=description, + array_annotations=array_annotations, **annotations) def _repr_pretty_(self, pp, cycle): super(SpikeTrain, self)._repr_pretty_(pp, cycle) @@ -352,12 +339,10 @@ def __reduce__(self): works ''' import numpy - return _new_spiketrain, (self.__class__, numpy.array(self), - self.t_stop, self.units, self.dtype, True, - self.sampling_rate, self.t_start, - self.waveforms, self.left_sweep, - self.name, self.file_origin, self.description, - self.array_annotations, self.annotations, + return _new_spiketrain, (self.__class__, numpy.array(self), self.t_stop, self.units, + self.dtype, True, self.sampling_rate, self.t_start, + self.waveforms, self.left_sweep, self.name, self.file_origin, + self.description, self.array_annotations, self.annotations, self.segment, self.unit) def __array_finalize__(self, obj): @@ -416,10 +401,9 @@ def __array_finalize__(self, obj): def __deepcopy__(self, memo): cls = self.__class__ - new_st = cls(np.array(self), self.t_stop, units=self.units, - dtype=self.dtype, copy=True, sampling_rate=self.sampling_rate, - t_start=self.t_start, waveforms=self.waveforms, - left_sweep=self.left_sweep, name=self.name, + new_st = cls(np.array(self), self.t_stop, units=self.units, dtype=self.dtype, copy=True, + sampling_rate=self.sampling_rate, t_start=self.t_start, + waveforms=self.waveforms, left_sweep=self.left_sweep, name=self.name, file_origin=self.file_origin, description=self.description) new_st.__dict__.update(self.__dict__) memo[id(self)] = new_st @@ -485,12 +469,10 @@ def __add__(self, time): else: t_start = self.t_start + time t_stop = self.t_stop + time - return SpikeTrain(times=new_times, t_stop=t_stop, - units=self.units, sampling_rate=self.sampling_rate, - t_start=t_start, waveforms=self.waveforms, - left_sweep=self.left_sweep, name=self.name, - file_origin=self.file_origin, - description=self.description, + return SpikeTrain(times=new_times, t_stop=t_stop, units=self.units, + sampling_rate=self.sampling_rate, t_start=t_start, + waveforms=self.waveforms, left_sweep=self.left_sweep, name=self.name, + file_origin=self.file_origin, description=self.description, array_annotations=copy.deepcopy(self.array_annotations), **self.annotations) @@ -525,12 +507,10 @@ def __sub__(self, time): else: t_start = self.t_start - time t_stop = self.t_stop - time - return SpikeTrain(times=spikes - time, t_stop=t_stop, - units=self.units, sampling_rate=self.sampling_rate, - t_start=t_start, waveforms=self.waveforms, - left_sweep=self.left_sweep, name=self.name, - file_origin=self.file_origin, - description=self.description, + return SpikeTrain(times=spikes - time, t_stop=t_stop, units=self.units, + sampling_rate=self.sampling_rate, t_start=t_start, + waveforms=self.waveforms, left_sweep=self.left_sweep, name=self.name, + file_origin=self.file_origin, description=self.description, array_annotations=copy.deepcopy(self.array_annotations), **self.annotations) @@ -552,9 +532,9 @@ def __setitem__(self, i, value): Set the value the item or slice :attr:`i`. ''' if not hasattr(value, "units"): - value = pq.Quantity(value, units=self.units) - # or should we be strict: raise ValueError("Setting a value - # requires a quantity")? + value = pq.Quantity(value, + units=self.units) # or should we be strict: raise ValueError( + # "Setting a value # requires a quantity")? # check for values outside t_start, t_stop _check_time_in_range(value, self.t_start, self.t_stop) super(SpikeTrain, self).__setitem__(i, value) @@ -572,15 +552,15 @@ def _copy_data_complement(self, other, deep_copy=False): ''' # Note: Array annotations cannot be copied because length of data can be changed # here which would cause inconsistencies - for attr in ("left_sweep", "sampling_rate", "name", "file_origin", - "description", "annotations"): + for attr in ("left_sweep", "sampling_rate", "name", "file_origin", "description", + "annotations"): attr_value = getattr(other, attr, None) if deep_copy: attr_value = copy.deepcopy(attr_value) setattr(self, attr, attr_value) - def duplicate_with_new_data(self, signal, t_start=None, t_stop=None, - waveforms=None, deep_copy=True, units=None): + def duplicate_with_new_data(self, signal, t_start=None, t_stop=None, waveforms=None, + deep_copy=True, units=None): ''' Create a new :class:`SpikeTrain` with the same metadata but different data (times, t_start, t_stop) @@ -598,8 +578,8 @@ def duplicate_with_new_data(self, signal, t_start=None, t_stop=None, else: units = pq.quantity.validate_dimensionality(units) - new_st = self.__class__(signal, t_start=t_start, t_stop=t_stop, - waveforms=waveforms, units=units) + new_st = self.__class__(signal, t_start=t_start, t_stop=t_stop, waveforms=waveforms, + units=units) new_st._copy_data_complement(self, deep_copy=deep_copy) # Note: Array annotations are not copied here, because length of data could change @@ -683,15 +663,12 @@ def merge(self, other): kwargs[name] = attr_self else: kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other) - merged_annotations = merge_annotations(self.annotations, - other.annotations) + merged_annotations = merge_annotations(self.annotations, other.annotations) kwargs.update(merged_annotations) - train = SpikeTrain(stack, units=self.units, dtype=self.dtype, - copy=False, t_start=self.t_start, - t_stop=self.t_stop, - sampling_rate=self.sampling_rate, - left_sweep=self.left_sweep, **kwargs) + train = SpikeTrain(stack, units=self.units, dtype=self.dtype, copy=False, + t_start=self.t_start, t_stop=self.t_stop, + sampling_rate=self.sampling_rate, left_sweep=self.left_sweep, **kwargs) if all(wfs): wfs_stack = np.vstack((self.waveforms, other.waveforms)) wfs_stack = wfs_stack[sorting] @@ -717,9 +694,6 @@ def _merge_array_annotations(self, other, sorting=None): assert sorting is not None, "The order of the merged spikes must be known" - # Make sure the user is notified for every object about which exact annotations are lost - warnings.simplefilter('always', UserWarning) - merged_array_annotations = {} omitted_keys_self = [] @@ -741,16 +715,14 @@ def _merge_array_annotations(self, other, sorting=None): omitted_keys_self.append(key) continue - omitted_keys_other = [key for key in other.array_annotations - if key not in self.array_annotations] + omitted_keys_other = [key for key in other.array_annotations if + key not in self.array_annotations] if omitted_keys_self or omitted_keys_other: warnings.warn("The following array annotations were omitted, because they were only " "present in one of the merged objects: {} from the one that was merged " - "into and {} from the one that was merged into the other". - format(omitted_keys_self, omitted_keys_other), UserWarning) - # Reset warning filter to default state - warnings.simplefilter("default") + "into and {} from the one that was merged into the other" + "".format(omitted_keys_self, omitted_keys_other), UserWarning) return merged_array_annotations diff --git a/neo/io/__init__.py b/neo/io/__init__.py index 1f4b98fa6..5afd93b72 100644 --- a/neo/io/__init__.py +++ b/neo/io/__init__.py @@ -43,6 +43,8 @@ .. autoclass:: neo.io.IgorIO +.. autoclass:: neo.io.IntanIO + .. autoclass:: neo.io.KlustaKwikIO .. autoclass:: neo.io.KwikIO @@ -75,6 +77,8 @@ .. autoclass:: neo.io.RawBinarySignalIO +.. autoclass:: neo.io.RawMCSIO + .. autoclass:: neo.io.StimfitIO .. autoclass:: neo.io.TdtIO @@ -119,6 +123,7 @@ # from neo.io.elphyio import ElphyIO from neo.io.exampleio import ExampleIO from neo.io.igorproio import IgorIO +from neo.io.intanio import IntanIO from neo.io.klustakwikio import KlustaKwikIO from neo.io.kwikio import KwikIO from neo.io.micromedio import MicromedIO @@ -136,6 +141,7 @@ from neo.io.pickleio import PickleIO from neo.io.plexonio import PlexonIO from neo.io.rawbinarysignalio import RawBinarySignalIO +from neo.io.rawmcsio import RawMCSIO from neo.io.spike2io import Spike2IO from neo.io.stimfitio import StimfitIO from neo.io.tdtio import TdtIO @@ -158,6 +164,7 @@ # ElphyIO, ExampleIO, IgorIO, + IntanIO, KlustaKwikIO, KwikIO, MicromedIO, @@ -174,6 +181,7 @@ PickleIO, PlexonIO, RawBinarySignalIO, + RawMCSIO, Spike2IO, StimfitIO, TdtIO, diff --git a/neo/io/axonio.py b/neo/io/axonio.py index f2b38aa4d..b375507e3 100644 --- a/neo/io/axonio.py +++ b/neo/io/axonio.py @@ -15,6 +15,28 @@ class AxonIO(AxonRawIO, BaseFromRaw): - abf = Axon binary file - atf is a text file based format from axon that could be read by AsciiIO (but this file is less efficient.) + + Here an important note from erikli@github for user who want to get the : + With Axon ABF2 files, the information that you need to recapitulate the original stimulus waveform (both digital and analog) is contained in multiple places. + + - `AxonIO._axon_info['protocol']` -- things like number of samples in episode + - `AxonIO.axon_info['section']['ADCSection']` | `AxonIO.axon_info['section']['DACSection']` -- things about the number of channels and channel properties + - `AxonIO._axon_info['protocol']['nActiveDACChannel']` -- bitmask specifying which DACs are actually active + - `AxonIO._axon_info['protocol']['nDigitalEnable']` -- bitmask specifying which set of Epoch timings should be used to specify the duration of digital outputs + - `AxonIO._axon_info['dictEpochInfoPerDAC']` -- dict of dict. First index is DAC channel and second index is Epoch number (i.e. information about Epoch A in Channel 2 would be in `AxonIO._axon_info['dictEpochInfoPerDAC'][2][0]`) + - `AxonIO._axon_info['EpochInfo']` -- list of dicts containing information about each Epoch's digital out pattern. Digital out is a bitmask with least significant bit corresponding to Digital Out 0 + - `AxonIO._axon_info['listDACInfo']` -- information about DAC name, scale factor, holding level, etc + - `AxonIO._t_starts` -- start time of each sweep in a unified time basis + - `AxonIO._sampling_rate` + + The current AxonIO.read_protocol() method utilizes a subset of these. + In particular I know it doesn't consider `nDigitalEnable`, `EpochInfo`, or `nActiveDACChannel` and it doesn't account + for different types of Epochs offered by Clampex/pClamp other than discrete steps (such as ramp, pulse train, etc and + encoded by `nEpochType` in the EpochInfoPerDAC section). I'm currently parsing a superset of the properties used + by read_protocol() in my analysis scripts, but that code still doesn't parse the full information and isn't in a state + where it could be committed and I can't currently prioritize putting together all the code that would parse the full + set of data. The `AxonIO._axon_info['EpochInfo']` section doesn't currently exist. + """ _prefered_signal_group_mode = 'split-all' diff --git a/neo/io/blackrockio_v4.py b/neo/io/blackrockio_v4.py index 64331df42..c2fcd69e1 100644 --- a/neo/io/blackrockio_v4.py +++ b/neo/io/blackrockio_v4.py @@ -232,22 +232,22 @@ def __init__(self, filename, nsx_override=None, nev_override=None, self._filenames = {} if nsx_override: self._filenames['nsx'] = re.sub( - os.path.extsep + 'ns[1,2,3,4,5,6]$', '', nsx_override) + os.path.extsep + r'ns[1,2,3,4,5,6]$', '', nsx_override) else: self._filenames['nsx'] = self.filename if nev_override: self._filenames['nev'] = re.sub( - os.path.extsep + 'nev$', '', nev_override) + os.path.extsep + r'nev$', '', nev_override) else: self._filenames['nev'] = self.filename if sif_override: self._filenames['sif'] = re.sub( - os.path.extsep + 'sif$', '', sif_override) + os.path.extsep + r'sif$', '', sif_override) else: self._filenames['sif'] = self.filename if ccf_override: self._filenames['ccf'] = re.sub( - os.path.extsep + 'ccf$', '', ccf_override) + os.path.extsep + r'ccf$', '', ccf_override) else: self._filenames['ccf'] = self.filename @@ -1267,7 +1267,7 @@ def __get_nsx_param_variant_a(self, param_name, nsx_nb): nsx_parameters = { 'labels': labels, 'units': np.array( - ['uV'] * + [b'uV'] * self.__nsx_basic_header[nsx_nb]['channel_count']), 'min_analog_val': -1 * np.array(dig_factor), 'max_analog_val': np.array(dig_factor), diff --git a/neo/io/elphyio.py b/neo/io/elphyio.py index 9fa8bde55..ac4ad125c 100644 --- a/neo/io/elphyio.py +++ b/neo/io/elphyio.py @@ -2985,7 +2985,7 @@ class LayoutFactory(object): def __init__(self, elphy_file): self.elphy_file = elphy_file - self.pattern = "\d{4}(\d+|\D)\D" + self.pattern = r"\d{4}(\d+|\D)\D" self.block_subclasses = dict() @property diff --git a/neo/io/intanio.py b/neo/io/intanio.py new file mode 100644 index 000000000..3ec3a829a --- /dev/null +++ b/neo/io/intanio.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- + +from neo.io.basefromrawio import BaseFromRaw +from neo.rawio.intanrawio import IntanRawIO + + +class IntanIO(IntanRawIO, BaseFromRaw): + __doc__ = IntanRawIO.__doc__ + _prefered_signal_group_mode = 'group-by-same-units' + + def __init__(self, filename): + IntanRawIO.__init__(self, filename=filename) + BaseFromRaw.__init__(self, filename) diff --git a/neo/io/neomatlabio.py b/neo/io/neomatlabio.py index f0606a21f..dc02e6415 100644 --- a/neo/io/neomatlabio.py +++ b/neo/io/neomatlabio.py @@ -385,7 +385,7 @@ def create_ob_from_struct(self, struct, classname): if attrname in dict_attributes: attrtype = dict_attributes[attrname][0] if attrtype == datetime: - m = '(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+).(\d+)' + m = r'(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+).(\d+)' r = re.findall(m, str(item)) if len(r) == 1: item = datetime(*[int(e) for e in r[0]]) diff --git a/neo/io/neuralynxio_v1.py b/neo/io/neuralynxio_v1.py index b7da8c4eb..f1970a818 100644 --- a/neo/io/neuralynxio_v1.py +++ b/neo/io/neuralynxio_v1.py @@ -1901,9 +1901,9 @@ def __read_text_header(self, filename, parameter_dict): 'Successfully decoded text header of file (%s).' % filename) def __get_cheetah_version_from_txt_header(self, text_header, filename): - version_regex = re.compile('((-CheetahRev )|' - '(ApplicationName Cheetah "))' - '(?P\d{1,3}\.\d{1,3}\.\d{1,3})') + version_regex = re.compile(r'((-CheetahRev )|' + r'(ApplicationName Cheetah "))' + r'(?P\d{1,3}\.\d{1,3}\.\d{1,3})') match = version_regex.search(text_header) if match: return match.groupdict()['version'] @@ -1913,27 +1913,27 @@ def __get_cheetah_version_from_txt_header(self, text_header, filename): def __get_filename_and_times_from_txt_header(self, text_header, version): if parse_version(version) <= parse_version('5.6.4'): - datetime1_regex = re.compile('## Time Opened \(m/d/y\): ' - '(?P\S+)' - ' \(h:m:s\.ms\) ' - '(?P