From 259648e32dec29018ce18b9960ce93db74355290 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Sat, 22 Sep 2018 19:38:58 +0200 Subject: [PATCH 01/41] [nixio] Read linked metadata properties into annotations --- neo/io/nixio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/io/nixio.py b/neo/io/nixio.py index f1be5ce31..a72a99625 100644 --- a/neo/io/nixio.py +++ b/neo/io/nixio.py @@ -1142,7 +1142,7 @@ def _nix_attr_to_neo(nix_obj): neo_attrs["nix_name"] = nix_obj.name neo_attrs["description"] = stringify(nix_obj.definition) if nix_obj.metadata: - for prop in nix_obj.metadata.props: + for prop in nix_obj.metadata.inherited_properties(): values = list(v.value for v in prop.values) if prop.unit: units = prop.unit From 9b00815e7034096b6eb43fe962baa87ae11bcb2e Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Wed, 18 Jul 2018 20:39:50 +0200 Subject: [PATCH 02/41] [nixio] Remove deprecated file backend argument --- neo/io/nixio.py | 2 +- neo/test/iotest/test_nixio.py | 19 ++++++------------- 2 files changed, 7 insertions(+), 14 deletions(-) diff --git a/neo/io/nixio.py b/neo/io/nixio.py index a72a99625..8c55a5b2e 100644 --- a/neo/io/nixio.py +++ b/neo/io/nixio.py @@ -124,7 +124,7 @@ def __init__(self, filename, mode="rw"): raise ValueError("Invalid mode specified '{}'. " "Valid modes: 'ro' (ReadOnly)', 'rw' (ReadWrite)," " 'ow' (Overwrite).".format(mode)) - self.nix_file = nix.File.open(self.filename, filemode, backend="h5py") + self.nix_file = nix.File.open(self.filename, filemode) if self.nix_file.mode == nix.FileMode.ReadOnly: self._file_version = '0.5.2' diff --git a/neo/test/iotest/test_nixio.py b/neo/test/iotest/test_nixio.py index c1d1a33bc..428149525 100644 --- a/neo/test/iotest/test_nixio.py +++ b/neo/test/iotest/test_nixio.py @@ -315,8 +315,7 @@ def compare_attr(self, neoobj, nixobj): @classmethod def create_full_nix_file(cls, filename): - nixfile = nix.File.open(filename, nix.FileMode.Overwrite, - backend="h5py") + nixfile = nix.File.open(filename, nix.FileMode.Overwrite) nix_block_a = nixfile.create_block(cls.rword(10), "neo.block") nix_block_a.definition = cls.rsentence(5, 10) @@ -643,9 +642,7 @@ def setUp(self): self.filename = os.path.join(self.tempdir, "testnixio.nix") self.writer = NixIO(self.filename, "ow") self.io = self.writer - self.reader = nix.File.open(self.filename, - nix.FileMode.ReadOnly, - backend="h5py") + self.reader = nix.File.open(self.filename, nix.FileMode.ReadOnly) def tearDown(self): self.writer.close() @@ -1209,8 +1206,7 @@ def checksignalcounts(fname): self.compare_blocks([blk], self.reader.blocks) # Read back and check counts - scndreader = nix.File.open(secondwrite, mode=nix.FileMode.ReadOnly, - backend="h5py") + scndreader = nix.File.open(secondwrite, mode=nix.FileMode.ReadOnly) self.compare_blocks([blk], scndreader.blocks) checksignalcounts(secondwrite) @@ -1369,22 +1365,19 @@ def test_context_write(self): with NixIO(self.filename, "ow") as iofile: iofile.write_block(neoblock) - nixfile = nix.File.open(self.filename, nix.FileMode.ReadOnly, - backend="h5py") + nixfile = nix.File.open(self.filename, nix.FileMode.ReadOnly) self.compare_blocks([neoblock], nixfile.blocks) nixfile.close() neoblock.annotate(**self.rdict(5)) with NixIO(self.filename, "rw") as iofile: iofile.write_block(neoblock) - nixfile = nix.File.open(self.filename, nix.FileMode.ReadOnly, - backend="h5py") + nixfile = nix.File.open(self.filename, nix.FileMode.ReadOnly) self.compare_blocks([neoblock], nixfile.blocks) nixfile.close() def test_context_read(self): - nixfile = nix.File.open(self.filename, nix.FileMode.Overwrite, - backend="h5py") + nixfile = nix.File.open(self.filename, nix.FileMode.Overwrite) name_one = self.rword() name_two = self.rword() nixfile.create_block(name_one, "neo.block") From 7032d68bbc22d173f542afcb4840f04c8a85cee7 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Wed, 18 Jul 2018 20:40:36 +0200 Subject: [PATCH 03/41] [nixio] nix.Value removal: New metadata compatibility NIX no longer uses Value class for managing the value of metadata properties. Values are added and retrieved directly. Empty property values are now supported, but require a type to be specified. The IO uses string as a default. NIX returns collections of property values as tuples. The IO returns lists explicitly for consistency with older behaviour. --- neo/io/nixio.py | 56 +++++++++++++++++++---------------- neo/test/iotest/test_nixio.py | 14 ++++----- 2 files changed, 37 insertions(+), 33 deletions(-) diff --git a/neo/io/nixio.py b/neo/io/nixio.py index 8c55a5b2e..6de830f04 100644 --- a/neo/io/nixio.py +++ b/neo/io/nixio.py @@ -597,18 +597,17 @@ def _write_channelindex(self, chx, nixblock): ) nixchan.definition = nixsource.definition chanmd = nixchan.metadata - chanmd["index"] = nix.Value(int(channel)) + chanmd["index"] = int(channel) if len(chx.channel_names): neochanname = stringify(chx.channel_names[idx]) - chanmd["neo_name"] = nix.Value(neochanname) + chanmd["neo_name"] = neochanname if len(chx.channel_ids): chanid = chx.channel_ids[idx] - chanmd["channel_id"] = nix.Value(chanid) + chanmd["channel_id"] = chanid if chx.coordinates is not None: coords = chx.coordinates[idx] coordunits = stringify(coords[0].dimensionality) - nixcoords = tuple(nix.Value(c.magnitude.item()) - for c in coords) + nixcoords = tuple(c.magnitude.item() for c in coords) chanprop = chanmd.create_property("coordinates", nixcoords) chanprop.unit = coordunits @@ -1076,26 +1075,28 @@ def _write_property(self, section, name, v): if isinstance(v, pq.Quantity): if len(v.shape): - section[name] = list(nix.Value(vv) for vv in v.magnitude) + section.create_property(name, tuple(v.magnitude)) else: - section[name] = nix.Value(v.magnitude.item()) + section.create_property(name, v.magnitude.item()) section.props[name].unit = str(v.dimensionality) elif isinstance(v, datetime): - section[name] = nix.Value(calculate_timestamp(v)) + section.create_property(name, calculate_timestamp(v)) elif isinstance(v, string_types): - section[name] = nix.Value(v) + if len(v): + section.create_property(name, v) + else: + section.create_property(name, nix.DataType.String) elif isinstance(v, bytes): - section[name] = nix.Value(v.decode()) + section.create_property(name, v.decode()) elif isinstance(v, Iterable): values = [] unit = None definition = None if len(v) == 0: - # empty list can't be saved in NIX property - # but we can store an empty string and use the - # definition to signify that it should be restored - # as an iterable (list) - values = "" + # NIX supports empty properties but dtype must be specified + # Defaulting to String and using definition to signify empty + # iterable as opposed to empty string + values = nix.DataType.String definition = EMPTYANNOTATION elif hasattr(v, "ndim") and v.ndim == 0: values = v.item() @@ -1104,26 +1105,26 @@ def _write_property(self, section, name, v): else: for item in v: if isinstance(item, string_types): - item = nix.Value(item) + item = item elif isinstance(item, pq.Quantity): unit = str(item.dimensionality) - item = nix.Value(item.magnitude.item()) + item = item.magnitude.item() elif isinstance(item, Iterable): self.logger.warn("Multidimensional arrays and nested " "containers are not currently " "supported when writing to NIX.") return None else: - item = nix.Value(item) + item = item values.append(item) - section[name] = values + section.create_property(name, values) section.props[name].unit = unit if definition: section.props[name].definition = definition elif type(v).__module__ == "numpy": - section[name] = nix.Value(v.item()) + section.create_property(name, v.item()) else: - section[name] = nix.Value(v) + section.create_property(name, v) return section.props[name] @staticmethod @@ -1147,12 +1148,15 @@ def _nix_attr_to_neo(nix_obj): if prop.unit: units = prop.unit values = create_quantity(values, units) - if len(values) == 1: + if not len(values): + if prop.definition == EMPTYANNOTATION: + values = list() + elif prop.data_type == nix.DataType.String: + values = "" + elif len(values) == 1: values = values[0] - if (not isinstance(values, pq.Quantity) and - values == "" and - prop.definition == EMPTYANNOTATION): - values = list() + else: + values = list(values) neo_attrs[prop.name] = values neo_attrs["name"] = stringify(neo_attrs.get("neo_name")) diff --git a/neo/test/iotest/test_nixio.py b/neo/test/iotest/test_nixio.py index 428149525..c7ec1a5cd 100644 --- a/neo/test/iotest/test_nixio.py +++ b/neo/test/iotest/test_nixio.py @@ -421,7 +421,7 @@ def create_full_nix_file(cls, filename): mtag_st.name, mtag_st.name + ".metadata" ) mtag_st.metadata = mtag_st_md - mtag_st_md.create_property("t_stop", nix.Value(times[-1] + 1.0)) + mtag_st_md.create_property("t_stop", times[-1] + 1.0) waveforms = cls.rquant((10, 8, 5), 1) wfname = "{}.waveforms".format(mtag_st.name) @@ -438,7 +438,7 @@ def create_full_nix_file(cls, filename): wfname, "neo.waveforms.metadata" ) wfda.metadata.create_property("left_sweep", - [nix.Value(20)] * 5) + [20] * 5) allspiketrains.append(mtag_st) # Epochs @@ -515,9 +515,9 @@ def create_full_nix_file(cls, filename): nixrc.metadata = nixchx.metadata.create_section( nixrc.name, "neo.channelindex.metadata" ) - nixrc.metadata.create_property("index", nix.Value(chan)) - nixrc.metadata.create_property("channel_id", nix.Value(chan + 1)) - dims = tuple(map(nix.Value, cls.rquant(3, 3))) + nixrc.metadata.create_property("index", chan) + nixrc.metadata.create_property("channel_id", chan + 1) + dims = cls.rquant(3, 1) coordprop = nixrc.metadata.create_property("coordinates", dims) coordprop.unit = "pm" @@ -1255,9 +1255,9 @@ def test_to_value(self): writeprop(section, "val", val) self.assertEqual(val, section["val"]) - # empty string + # empty string (gets stored as empty list) writeprop(section, "emptystring", "") - self.assertEqual("", section["emptystring"]) + self.assertEqual(list(), section["emptystring"]) def test_annotations_special_cases(self): # Special cases for annotations: empty list, list of strings, From ce09164dbfbf4cdd286930fde6f59d38069ede23 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Sun, 23 Sep 2018 11:22:01 +0200 Subject: [PATCH 04/41] [nixio] Inherit properties from linked sections When reading the metadata from NIX objects, add the inherited properties to the object as well, allowing users to link the primary metadata of the object to extended metadata properties that are added later. --- neo/io/nixio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/io/nixio.py b/neo/io/nixio.py index 6de830f04..e1e347b0d 100644 --- a/neo/io/nixio.py +++ b/neo/io/nixio.py @@ -1144,7 +1144,7 @@ def _nix_attr_to_neo(nix_obj): neo_attrs["description"] = stringify(nix_obj.definition) if nix_obj.metadata: for prop in nix_obj.metadata.inherited_properties(): - values = list(v.value for v in prop.values) + values = prop.values if prop.unit: units = prop.unit values = create_quantity(values, units) From 92e7164cf152700c4ae60013bfbb9536e425a64c Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Sun, 23 Sep 2018 13:51:12 +0200 Subject: [PATCH 05/41] [nixio] Change filename for NIXRawIO tests --- neo/rawio/tests/test_nixrawio.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/neo/rawio/tests/test_nixrawio.py b/neo/rawio/tests/test_nixrawio.py index 3f2669cbe..7c47f6217 100644 --- a/neo/rawio/tests/test_nixrawio.py +++ b/neo/rawio/tests/test_nixrawio.py @@ -3,9 +3,10 @@ from neo.rawio.tests.common_rawio_test import BaseTestRawIO -testfname = "neoraw.nix" +testfname = "nixrawio-1.5.nix" -class TestNixRawIO(BaseTestRawIO, unittest.TestCase, ): + +class TestNixRawIO(BaseTestRawIO, unittest.TestCase): rawioclass = NIXRawIO entities_to_test = [testfname] files_to_download = [testfname] From 1087151dee96d4b5dce8ca65ebcd1ea4abd8206c Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Sun, 23 Sep 2018 13:54:57 +0200 Subject: [PATCH 06/41] [setup] Update nixio minimum version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e634c2ed8..1624d9a4a 100755 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ 'igorproio': ['igor'], 'kwikio': ['scipy', 'klusta'], 'neomatlabio': ['scipy>=0.12.0'], - 'nixio': ['nixio>=1.4.3'], + 'nixio': ['nixio>=1.5.0b2'], 'stimfitio': ['stfio'], 'axographio': ['axographio'] } From 9d8906aa2dc990715382b191610786b2ab137d93 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Sun, 23 Sep 2018 13:57:07 +0200 Subject: [PATCH 07/41] [doc] Update min version for nixio in install docs --- doc/source/install.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 804711d82..da461aa5b 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -28,7 +28,7 @@ Neo will still install but the IO module that uses them will fail on loading: * h5py >= 2.5 for Hdf5IO, KwikIO * klusta for KwikIO * igor >= 0.2 for IgorIO - * nixio >= 1.2 for NixIO + * nixio >= 1.5 for NixIO * stfio for StimfitIO From 4f39a6439c471f2e038b42e9a23e6f35554dd99e Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Sun, 23 Sep 2018 16:09:20 +0200 Subject: [PATCH 08/41] [testing] Update circleci config for newer nixio version --- .circleci/requirements_testing.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/requirements_testing.txt b/.circleci/requirements_testing.txt index 7ccb1e849..f859f6a6c 100644 --- a/.circleci/requirements_testing.txt +++ b/.circleci/requirements_testing.txt @@ -3,7 +3,7 @@ h5py igor klusta tqdm -nixio>=1.4.3 +nixio>=1.5.0b2 axographio>=0.3.1 matplotlib ipython From 6aeba1ba7f85fae07c68608ae82a74b78c855b07 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 27 Sep 2018 16:56:12 +0200 Subject: [PATCH 09/41] Implementation of the "raw" Multi Channel System (MCS) IO at rawio level. --- neo/io/__init__.py | 4 + neo/io/rawmcsio.py | 12 +++ neo/rawio/__init__.py | 2 + neo/rawio/rawmcsrawio.py | 158 ++++++++++++++++++++++++++++ neo/rawio/tests/test_rawmcsrawio.py | 19 ++++ neo/test/iotest/test_rawmcsio.py | 25 +++++ 6 files changed, 220 insertions(+) create mode 100644 neo/io/rawmcsio.py create mode 100644 neo/rawio/rawmcsrawio.py create mode 100644 neo/rawio/tests/test_rawmcsrawio.py create mode 100644 neo/test/iotest/test_rawmcsio.py diff --git a/neo/io/__init__.py b/neo/io/__init__.py index e53e71a0f..abc5de863 100644 --- a/neo/io/__init__.py +++ b/neo/io/__init__.py @@ -73,6 +73,8 @@ .. autoclass:: neo.io.RawBinarySignalIO +.. autoclass:: neo.io.RawMCSIO + .. autoclass:: neo.io.StimfitIO .. autoclass:: neo.io.TdtIO @@ -133,6 +135,7 @@ from neo.io.pickleio import PickleIO from neo.io.plexonio import PlexonIO from neo.io.rawbinarysignalio import RawBinarySignalIO +from neo.io.rawmcsio import RawMCSIO from neo.io.spike2io import Spike2IO from neo.io.stimfitio import StimfitIO from neo.io.tdtio import TdtIO @@ -170,6 +173,7 @@ PickleIO, PlexonIO, RawBinarySignalIO, + RawMCSIO, Spike2IO, StimfitIO, TdtIO, diff --git a/neo/io/rawmcsio.py b/neo/io/rawmcsio.py new file mode 100644 index 000000000..48a207afb --- /dev/null +++ b/neo/io/rawmcsio.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- + +from neo.io.basefromrawio import BaseFromRaw +from neo.rawio.rawmcsrawio import RawMCSRawIO + + +class RawMCSIO(RawMCSRawIO, BaseFromRaw): + _prefered_signal_group_mode = 'group-by-same-units' + + def __init__(self, filename): + RawMCSRawIO.__init__(self, filename=filename) + BaseFromRaw.__init__(self, filename) diff --git a/neo/rawio/__init__.py b/neo/rawio/__init__.py index 061055e4e..23ea0a510 100644 --- a/neo/rawio/__init__.py +++ b/neo/rawio/__init__.py @@ -22,6 +22,7 @@ from neo.rawio.nixrawio import NIXRawIO from neo.rawio.plexonrawio import PlexonRawIO from neo.rawio.rawbinarysignalrawio import RawBinarySignalRawIO +from neo.rawio.rawmcsrawio import RawMCSRawIO from neo.rawio.spike2rawio import Spike2RawIO from neo.rawio.tdtrawio import TdtRawIO from neo.rawio.winedrrawio import WinEdrRawIO @@ -39,6 +40,7 @@ NIXRawIO, PlexonRawIO, RawBinarySignalRawIO, + RawMCSRawIO, Spike2RawIO, TdtRawIO, WinEdrRawIO, diff --git a/neo/rawio/rawmcsrawio.py b/neo/rawio/rawmcsrawio.py new file mode 100644 index 000000000..a761bacf5 --- /dev/null +++ b/neo/rawio/rawmcsrawio.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +""" +Class for reading data from "Raw" Multi Channel System (MCS) format. +This format is NOT the native MCS format (*.mcd). +This format is a raw format with an internal binary header exported by the +"MC_DataTool binary conversion". + +The internal header contain sampling rate, channel names, gain and units. +Not so bad : everything that neo need, so this IO is without parameters. + +If some MCS custumers read this you should lobby to get the real specification +of the real MCS format (.mcd) and so the MCSRawIO could be done instead of this +ersatz. + + +Author: Samuel Garcia +""" +from __future__ import unicode_literals, print_function, division, absolute_import + +from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype, + _event_channel_dtype) + +import numpy as np + +import os +import sys + + +class RawMCSRawIO(BaseRawIO): + extensions = ['raw',] + rawmode = 'one-file' + + def __init__(self, filename=''): + BaseRawIO.__init__(self) + self.filename = filename + + def _source_name(self): + return self.filename + + def _parse_header(self): + + + self._info = info = parse_mcs_raw_header(self.filename) + + + self.dtype = 'uint16' + self.sampling_rate = info['sampling_rate'] + self.nb_channel = len(info['channel_names']) + + self._raw_signals = np.memmap(self.filename, dtype=self.dtype, mode='r', + offset=info['header_size']).reshape(-1, self.nb_channel) + + sig_channels = [] + for c in range(self.nb_channel): + chan_id = c + group_id = 0 + sig_channels.append((info['channel_names'][c], chan_id, self.sampling_rate, self.dtype, + info['signal_units'], info['signal_gain'], info['signal_offset'], group_id)) + sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype) + + # No events + event_channels = [] + event_channels = np.array(event_channels, dtype=_event_channel_dtype) + + # No spikes + unit_channels = [] + unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype) + + # fille into header dict + self.header = {} + self.header['nb_block'] = 1 + self.header['nb_segment'] = [1] + self.header['signal_channels'] = sig_channels + self.header['unit_channels'] = unit_channels + self.header['event_channels'] = event_channels + + # insert some annotation at some place + self._generate_minimal_annotations() + + def _segment_t_start(self, block_index, seg_index): + return 0. + + def _segment_t_stop(self, block_index, seg_index): + t_stop = self._raw_signals.shape[0] / self.sampling_rate + return t_stop + + def _get_signal_size(self, block_index, seg_index, channel_indexes): + return self._raw_signals.shape[0] + + def _get_signal_t_start(self, block_index, seg_index, channel_indexes): + return 0. + + def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes): + if channel_indexes is None: + channel_indexes = slice(None) + raw_signals = self._raw_signals[slice(i_start, i_stop), channel_indexes] + + return raw_signals + + +def parse_mcs_raw_header(filename): + """ + This is a mix with stuff on github. + https://github.com/spyking-circus/spyking-circus/blob/master/circus/files/mcs_raw_binary.py + https://github.com/jeffalstott/Multi-Channel-Systems-Import/blob/master/MCS.py + + """ + MAX_HEADER_SIZE = 5000 + + with open(filename, mode='rb') as f: + raw_header = f.read(MAX_HEADER_SIZE) + header_size = raw_header.find(b'EOH') + assert header_size !=-1, 'Error in reading raw mcs header' + header_size = header_size + 5 + #~ print(header_size) + raw_header = raw_header[:header_size] + raw_header = raw_header.replace(b'\r',b'') + + info = {} + info['header_size'] = header_size + + def parse_line(line, key): + if key + b' = ' in line: + v = line.replace(key, b'').replace(b' ', b'').replace(b'=', b'') + return v + + keys = (b'Sample rate', b'ADC zero', b'ADC zero',b'El', b'Streams') + + for line in raw_header.split(b'\n'): + for key in keys: + v = parse_line(line, key) + if v is None: + continue + + if key == b'Sample rate': + info['sampling_rate'] = float(v) + + elif key == b'ADC zero': + info['adc_zero'] = int(v) + + elif key == b'El': + v = v.decode('Windows-1252') + v = v.replace('/AD', '') + split_pos = 0 + while v[split_pos] in '1234567890.': + split_pos += 1 + if split_pos == len(v): + split_pos = None + break + assert split_pos is not None, 'Impossible to find units and scaling' + info['signal_gain'] = float(v[:split_pos]) + info['signal_units'] = v[split_pos:] + info['signal_offset'] = -info['signal_gain'] * info['adc_zero'] + + elif key == b'Streams': + info['channel_names'] = v.decode('Windows-1252').split(';') + + return info diff --git a/neo/rawio/tests/test_rawmcsrawio.py b/neo/rawio/tests/test_rawmcsrawio.py new file mode 100644 index 000000000..442480709 --- /dev/null +++ b/neo/rawio/tests/test_rawmcsrawio.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +# needed for python 3 compatibility +from __future__ import unicode_literals, print_function, division, absolute_import + +import unittest + +from neo.rawio.rawmcsrawio import RawMCSRawIO +from neo.rawio.tests.common_rawio_test import BaseTestRawIO + + +class TestRawMCSRawIO(BaseTestRawIO, unittest.TestCase, ): + rawioclass = RawMCSRawIO + entities_to_test = [] #TODO + files_to_download = entities_to_test + + +if __name__ == "__main__": + unittest.main() diff --git a/neo/test/iotest/test_rawmcsio.py b/neo/test/iotest/test_rawmcsio.py new file mode 100644 index 000000000..0a1b245af --- /dev/null +++ b/neo/test/iotest/test_rawmcsio.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +""" +Tests of neo.io.plexonio +""" + +# needed for python 3 compatibility +from __future__ import absolute_import, division + +import sys + +import unittest + +from neo.io import RawMCSIO +from neo.test.iotest.common_io_test import BaseTestIO + + +class TestRawMcsIO(BaseTestIO, unittest.TestCase, ): + ioclass = RawMCSIO + files_to_test = [ #TODO + ] + files_to_download = files_to_test + + +if __name__ == "__main__": + unittest.main() From 0ee0e5e371656a41ae50f4e576bccf028e18b6a6 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 2 Oct 2018 12:04:35 +0200 Subject: [PATCH 10/41] pep8 + file for testing. --- neo/rawio/rawmcsrawio.py | 41 +++++++++++++---------------- neo/rawio/tests/test_rawmcsrawio.py | 2 +- neo/test/iotest/test_rawmcsio.py | 6 +---- 3 files changed, 21 insertions(+), 28 deletions(-) diff --git a/neo/rawio/rawmcsrawio.py b/neo/rawio/rawmcsrawio.py index a761bacf5..6bd5ba758 100644 --- a/neo/rawio/rawmcsrawio.py +++ b/neo/rawio/rawmcsrawio.py @@ -2,14 +2,14 @@ """ Class for reading data from "Raw" Multi Channel System (MCS) format. This format is NOT the native MCS format (*.mcd). -This format is a raw format with an internal binary header exported by the -"MC_DataTool binary conversion". +This format is a raw format with an internal binary header exported by the +"MC_DataTool binary conversion" with the option header slected. The internal header contain sampling rate, channel names, gain and units. Not so bad : everything that neo need, so this IO is without parameters. If some MCS custumers read this you should lobby to get the real specification -of the real MCS format (.mcd) and so the MCSRawIO could be done instead of this +of the real MCS format (.mcd) and so the MCSRawIO could be done instead of this ersatz. @@ -27,7 +27,7 @@ class RawMCSRawIO(BaseRawIO): - extensions = ['raw',] + extensions = ['raw'] rawmode = 'one-file' def __init__(self, filename=''): @@ -38,11 +38,8 @@ def _source_name(self): return self.filename def _parse_header(self): - - self._info = info = parse_mcs_raw_header(self.filename) - - + self.dtype = 'uint16' self.sampling_rate = info['sampling_rate'] self.nb_channel = len(info['channel_names']) @@ -54,8 +51,9 @@ def _parse_header(self): for c in range(self.nb_channel): chan_id = c group_id = 0 - sig_channels.append((info['channel_names'][c], chan_id, self.sampling_rate, self.dtype, - info['signal_units'], info['signal_gain'], info['signal_offset'], group_id)) + sig_channels.append((info['channel_names'][c], chan_id, self.sampling_rate, + self.dtype, info['signal_units'], info['signal_gain'], + info['signal_offset'], group_id)) sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype) # No events @@ -103,41 +101,40 @@ def parse_mcs_raw_header(filename): This is a mix with stuff on github. https://github.com/spyking-circus/spyking-circus/blob/master/circus/files/mcs_raw_binary.py https://github.com/jeffalstott/Multi-Channel-Systems-Import/blob/master/MCS.py - """ MAX_HEADER_SIZE = 5000 with open(filename, mode='rb') as f: raw_header = f.read(MAX_HEADER_SIZE) + header_size = raw_header.find(b'EOH') - assert header_size !=-1, 'Error in reading raw mcs header' + assert header_size != -1, 'Error in reading raw mcs header' header_size = header_size + 5 - #~ print(header_size) raw_header = raw_header[:header_size] - raw_header = raw_header.replace(b'\r',b'') - + raw_header = raw_header.replace(b'\r', b'') + info = {} info['header_size'] = header_size - + def parse_line(line, key): if key + b' = ' in line: v = line.replace(key, b'').replace(b' ', b'').replace(b'=', b'') return v - keys = (b'Sample rate', b'ADC zero', b'ADC zero',b'El', b'Streams') + keys = (b'Sample rate', b'ADC zero', b'ADC zero', b'El', b'Streams') for line in raw_header.split(b'\n'): for key in keys: v = parse_line(line, key) if v is None: continue - + if key == b'Sample rate': info['sampling_rate'] = float(v) - + elif key == b'ADC zero': info['adc_zero'] = int(v) - + elif key == b'El': v = v.decode('Windows-1252') v = v.replace('/AD', '') @@ -149,9 +146,9 @@ def parse_line(line, key): break assert split_pos is not None, 'Impossible to find units and scaling' info['signal_gain'] = float(v[:split_pos]) - info['signal_units'] = v[split_pos:] + info['signal_units'] = v[split_pos:].replace(u'µ', u'u') info['signal_offset'] = -info['signal_gain'] * info['adc_zero'] - + elif key == b'Streams': info['channel_names'] = v.decode('Windows-1252').split(';') diff --git a/neo/rawio/tests/test_rawmcsrawio.py b/neo/rawio/tests/test_rawmcsrawio.py index 442480709..f3689e7ef 100644 --- a/neo/rawio/tests/test_rawmcsrawio.py +++ b/neo/rawio/tests/test_rawmcsrawio.py @@ -11,7 +11,7 @@ class TestRawMCSRawIO(BaseTestRawIO, unittest.TestCase, ): rawioclass = RawMCSRawIO - entities_to_test = [] #TODO + entities_to_test = ['raw_mcs_with_header_1.raw'] files_to_download = entities_to_test diff --git a/neo/test/iotest/test_rawmcsio.py b/neo/test/iotest/test_rawmcsio.py index 0a1b245af..b4513c0e3 100644 --- a/neo/test/iotest/test_rawmcsio.py +++ b/neo/test/iotest/test_rawmcsio.py @@ -1,7 +1,4 @@ # -*- coding: utf-8 -*- -""" -Tests of neo.io.plexonio -""" # needed for python 3 compatibility from __future__ import absolute_import, division @@ -16,8 +13,7 @@ class TestRawMcsIO(BaseTestIO, unittest.TestCase, ): ioclass = RawMCSIO - files_to_test = [ #TODO - ] + files_to_test = ['raw_mcs_with_header_1.raw'] files_to_download = files_to_test From c9ef6e5470a194d617f354f462958d36edc2a5b2 Mon Sep 17 00:00:00 2001 From: Andrew Davison Date: Mon, 22 Oct 2018 17:52:49 +0200 Subject: [PATCH 11/41] add `Event.to_epoch` method Replaces #404 --- neo/core/event.py | 47 ++++++++++++++++++++++++++++ neo/test/coretest/test_event.py | 54 +++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) diff --git a/neo/core/event.py b/neo/core/event.py index b996375cb..684394516 100644 --- a/neo/core/event.py +++ b/neo/core/event.py @@ -16,6 +16,8 @@ import quantities as pq from neo.core.baseneo import BaseNeo, merge_annotations +from neo.core.epoch import Epoch + PY_VER = sys.version_info[0] @@ -256,3 +258,48 @@ def as_quantity(self): Return the event times as a quantities array. """ return self.view(pq.Quantity) + + def to_epoch(self, pairwise=False, durations=None): + """ + Transform Event to Epoch. + + This method has three modes of action. + + 1. By default, an array of `n` event times will be transformed into + an array of `n-1` epochs, where the end of one epoch is the + beginning of the next. + 2. If `pairwise` is True, then the event times will be taken as pairs + representing the start and end time of an epoch. The number of + events must be even, otherwise a ValueError is raised. + 3. If `durations` is given, it should be a scalar Quantity or a + Quantity array of the same size as the Event. + Each event time is then taken as the start of an epoch of duration + given by `durations`. + + `pairwise=True` and `durations` are mutually exclusive. A ValueError + will be raised if both are given. + """ + + if pairwise: + # Mode 2 + if durations is not None: + raise ValueError("Inconsistent arguments. " + "Cannot give both `pairwise` and `durations`") + if self.size % 2 != 0: + raise ValueError("Pairwise conversion of events to epochs" + " requires an even number of events") + times = self.times[::2] + durations = self.times[1::2] - times + labels = np.array(["{}-{}".format(a, b) + for a, b in zip(self.labels[::2], self.labels[1::2])]) + elif durations is None: + # Mode 1 + times = self.times[:-1] + durations = np.diff(self.times) + labels = np.array(["{}-{}".format(a, b) + for a, b in zip(self.labels[:-1], self.labels[1:])]) + else: + # Mode 3 + times = self.times + labels = self.labels + return Epoch(times=times, durations=durations, labels=labels) diff --git a/neo/test/coretest/test_event.py b/neo/test/coretest/test_event.py index 2e5985132..142e450ab 100644 --- a/neo/test/coretest/test_event.py +++ b/neo/test/coretest/test_event.py @@ -19,6 +19,7 @@ HAVE_IPYTHON = True from neo.core.event import Event +from neo.core.epoch import Epoch from neo.core import Segment from neo.test.tools import (assert_neo_object_is_compliant, assert_arrays_equal, @@ -399,6 +400,59 @@ def test_as_quantity(self): self.assertIsInstance(evt_as_q, pq.Quantity) assert_array_equal(data * pq.ms, evt_as_q) + def test_to_epoch(self): + seg = Segment(name="test") + event = Event(times=np.array([5.0, 12.0, 23.0, 45.0]), units="ms", + labels=np.array(["A", "B", "C", "D"])) + event.segment = seg + + # Mode 1 + epoch = event.to_epoch() + self.assertIsInstance(epoch, Epoch) + assert_array_equal(epoch.times.magnitude, + np.array([5.0, 12.0, 23.0])) + assert_array_equal(epoch.durations.magnitude, + np.array([7.0, 11.0, 22.0])) + assert_array_equal(epoch.labels, + np.array(['A-B', 'B-C', 'C-D'])) + + # Mode 2 + epoch = event.to_epoch(pairwise=True) + assert_array_equal(epoch.times.magnitude, + np.array([5.0, 23.0])) + assert_array_equal(epoch.durations.magnitude, + np.array([7.0, 22.0])) + assert_array_equal(epoch.labels, + np.array(['A-B', 'C-D'])) + + # Mode 3 (scalar) + epoch = event.to_epoch(durations=2.0*pq.ms) + assert_array_equal(epoch.times.magnitude, + np.array([5.0, 12.0, 23.0, 45.0])) + assert_array_equal(epoch.durations.magnitude, + np.array([2.0, 2.0, 2.0, 2.0])) + assert_array_equal(epoch.labels, + np.array(['A', 'B', 'C', 'D'])) + + # Mode 3 (array) + epoch = event.to_epoch(durations=np.array([2.0, 3.0, 4.0, 5.0])*pq.ms) + assert_array_equal(epoch.times.magnitude, + np.array([5.0, 12.0, 23.0, 45.0])) + assert_array_equal(epoch.durations.magnitude, + np.array([2.0, 3.0, 4.0, 5.0])) + assert_array_equal(epoch.labels, + np.array(['A', 'B', 'C', 'D'])) + + # Error conditions + self.assertRaises(ValueError, event.to_epoch, pairwise=True, durations=2.0*pq.ms) + + odd_event = Event(times=np.array([5.0, 12.0, 23.0]), units="ms", + labels=np.array(["A", "B", "C"])) + self.assertRaises(ValueError, odd_event.to_epoch, pairwise=True) + + # todo: fix Epoch, as the following does not raise a ValueError + # self.assertRaises(ValueError, event.to_epoch, durations=2.0) # missing units + class TestDuplicateWithNewData(unittest.TestCase): def setUp(self): From 80a5d644a4268c04ae3a69188910bff106a24c97 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 31 Oct 2018 16:08:33 +0100 Subject: [PATCH 12/41] Init intanrawio --- neo/rawio/__init__.py | 2 ++ neo/rawio/intanrawio.py | 27 +++++++++++++++++++++++++++ neo/rawio/tests/test_intanrawio.py | 21 +++++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 neo/rawio/intanrawio.py create mode 100644 neo/rawio/tests/test_intanrawio.py diff --git a/neo/rawio/__init__.py b/neo/rawio/__init__.py index 061055e4e..95c991be4 100644 --- a/neo/rawio/__init__.py +++ b/neo/rawio/__init__.py @@ -15,6 +15,7 @@ from neo.rawio.blackrockrawio import BlackrockRawIO from neo.rawio.brainvisionrawio import BrainVisionRawIO from neo.rawio.elanrawio import ElanRawIO +from neo.rawio.intanrawio import IntanRawIO from neo.rawio.micromedrawio import MicromedRawIO from neo.rawio.neuralynxrawio import NeuralynxRawIO from neo.rawio.neuroexplorerrawio import NeuroExplorerRawIO @@ -32,6 +33,7 @@ BlackrockRawIO, BrainVisionRawIO, ElanRawIO, + IntanRawIO, MicromedRawIO, NeuralynxRawIO, NeuroExplorerRawIO, diff --git a/neo/rawio/intanrawio.py b/neo/rawio/intanrawio.py new file mode 100644 index 000000000..b3d86303a --- /dev/null +++ b/neo/rawio/intanrawio.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +""" + +Support for intan tech rhd file. + +See http://intantech.com/files/Intan_RHD2000_data_file_formats.pdf + +Author: Samuel Garcia + +""" +from __future__ import print_function, division, absolute_import +# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3 + +from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype, + _event_channel_dtype) + +import numpy as np +from collections import OrderedDict + + +class IntanRawIO(BaseRawIO): + """ + + """ + extensions = ['rdh'] + rawmode = 'one-dir' + \ No newline at end of file diff --git a/neo/rawio/tests/test_intanrawio.py b/neo/rawio/tests/test_intanrawio.py new file mode 100644 index 000000000..ce99a7b73 --- /dev/null +++ b/neo/rawio/tests/test_intanrawio.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- + +# needed for python 3 compatibility +from __future__ import unicode_literals, print_function, division, absolute_import + +import unittest + +from neo.rawio.intanrawio import IntanRawIO + +from neo.rawio.tests.common_rawio_test import BaseTestRawIO + + +class TestIntanRawIO(BaseTestRawIO, unittest.TestCase, ): + rawioclass = IntanRawIO + files_to_download = [ + ] + entities_to_test = files_to_download + + +if __name__ == "__main__": + unittest.main() From 57816f9a11d2ab797a635f1dc6b7c37f8d1918a8 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Mon, 5 Nov 2018 16:18:11 +0100 Subject: [PATCH 13/41] WIP intan --- neo/rawio/intanrawio.py | 249 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 245 insertions(+), 4 deletions(-) diff --git a/neo/rawio/intanrawio.py b/neo/rawio/intanrawio.py index b3d86303a..9138a39cd 100644 --- a/neo/rawio/intanrawio.py +++ b/neo/rawio/intanrawio.py @@ -1,9 +1,13 @@ # -*- coding: utf-8 -*- """ -Support for intan tech rhd file. +Support for intan tech rhd and rhs files. -See http://intantech.com/files/Intan_RHD2000_data_file_formats.pdf +This 2 formats are more or less the same with some variance in headers. + +See: + * http://intantech.com/files/Intan_RHD2000_data_file_formats.pdf + * http://intantech.com/files/Intan_RHS2000_data_file_formats.pdf Author: Samuel Garcia @@ -22,6 +26,243 @@ class IntanRawIO(BaseRawIO): """ """ - extensions = ['rdh'] - rawmode = 'one-dir' + extensions = ['rhd', 'rhs'] + rawmode = 'one-file' + + def __init__(self, filename=''): + BaseRawIO.__init__(self) + self.filename = filename + + def _source_name(self): + return self.filename + + def _parse_header(self): + + if self.filename.endswith('.rhs'): + info = read_rhs(self.filename) + elif self.filename.endswith('.rhd'): + info = read_rh(self.filename) + + exit() + + + + # signals + sig_channels = [] + for c in range(nb_channel): + name = 'ch{}grp{}'.format(c, channel_group[c]) + chan_id = c + units = 'mV' + offset = 0. + group_id = 0 + sig_channels.append((name, chan_id, self._sampling_rate, + sig_dtype, units, gain, offset, group_id)) + sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype) + + # No events + event_channels = [] + event_channels = np.array(event_channels, dtype=_event_channel_dtype) + + # No spikes + unit_channels = [] + unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype) + + # fille into header dict + self.header = {} + self.header['nb_block'] = 1 + self.header['nb_segment'] = [1] + self.header['signal_channels'] = sig_channels + self.header['unit_channels'] = unit_channels + self.header['event_channels'] = event_channels + + self._generate_minimal_annotations() + + def _segment_t_start(self, block_index, seg_index): + return 0. + + def _segment_t_stop(self, block_index, seg_index): + t_stop = self._raw_signals.shape[0] / self._sampling_rate + return t_stop + + def _get_signal_size(self, block_index, seg_index, channel_indexes): + return self._raw_signals.shape[0] + + def _get_signal_t_start(self, block_index, seg_index, channel_indexes): + return 0. + + def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes): + if channel_indexes is None: + channel_indexes = slice(None) + raw_signals = self._raw_signals[slice(i_start, i_stop), channel_indexes] + return raw_signals + + + + +def read_qstring(f): + a = '' + length = np.fromfile(f, dtype='uint32', count=1)[0] + + print('length', length, hex(length)) + #~ exit() + + if length == 0xFFFFFFFF or length == '': + return '' + + txt ='' + txt = f.read(length // 2) #.decode('utf-16') + #~ print(txt) + #~ txt = txt.decode() + + #~ for ii in range(length): + #~ print('ii', ii) + #~ newchar = np.fromfile(f, 'u2', 1)[0] + #~ print(newchar) + #~ a += newchar.tostring().decode('utf-16') + return txt + + +import struct +import os +import sys +def read_qstring(fid): + """Read Qt style QString. + + The first 32-bit unsigned number indicates the length of the string (in bytes). + If this number equals 0xFFFFFFFF, the string is null. + + Strings are stored as unicode. + """ + + length, = struct.unpack(' (os.fstat(fid.fileno()).st_size - fid.tell() + 1) : + print(length) + raise Exception('Length too long.') + + # convert length from bytes to 16-bit Unicode words + length = int(length / 2) + + data = [] + for i in range(0, length): + c, = struct.unpack('= (3,0): + a = ''.join([chr(c) for c in data]) + else: + a = ''.join([unichr(c) for c in data]) + + return a + + + + +rhs_global_header =[ + ('magic_number', 'uint32'), # 0xD69127AC for rhs 0xC6912702 for rdh + + ('major_version', 'int16'), + ('minor_version', 'int16'), + + ('sampling_rate', 'float32'), + + ('dsp_enabled', 'int16'), + + ('actual_dsp_cutoff_frequency', 'float32'), + ('actual_lower_bandwidth', 'float32'), + ('actual_lower_settle_bandwidth', 'float32'), ####### + ('actual_upper_bandwidth', 'float32'), + ('desired_dsp_cutoff_frequency', 'float32'), + ('desired_lower_bandwidth', 'float32'), + ('desired_lower_settle_bandwidth', 'float32'), ##### + ('desired_upper_bandwidth', 'float32'), + + ('notch_filter_mode', 'int16'), # 0 :no filter 1: 50Hz 2 : 60Hz + + ('desired_impedance_test_frequency', 'float32'), + ('actual_impedance_test_frequency', 'float32'), + + ('amp_settle_mode', 'int16'), #### + ('charge_recovery_mode', 'int16'), #### + + ('stim_step_size', 'float32'), #### + ('recovery_current_limit', 'float32'), #### + ('recovery_target_voltage', 'float32'), #### + + ('note1', 'QString'), + ('note2', 'QString'), + ('note3', 'QString'), + + ('dc_amplifier_data_saved', 'int16'), ###### nb_temp_sensor + + + + ('board_mode', 'int16'), + + ('ref_channel_name', 'QString'), + + ('nb_signal_group', 'int16'), + +] + +signal_group_header = [ + ('signal_group_name', 'QString'), + ('signal_group_prefix', 'QString'), + ('signal_group_enabled', 'int16'), + ('channel_num', 'int16'), + ('amplified_channel_num', 'int16'), +] + +signal_channel_header = [ + ('native_channel_name', 'QString'), + ('custom_channel_name', 'QString'), + ('native_order', 'int16'), + ('custom_order', 'int16'), + ('signal_type', 'int16'), + ('channel_enabled', 'int16'), + ('chip_channel_num', 'int16'), + ('board_stream_num', 'int16'), + ('spike_scope_trigger_mode', 'int16'), + ('spike_scope_voltage_thresh', 'int16'), + ('spike_scope_digital_trigger_channel', 'int16'), + ('spike_scope_digital_edge_polarity', 'int16'), + ('electrode_impedance_magnitude', 'float32'), + ('electrode_impedance_phase', 'float32'), + +] + + +def read_variable_header(f, header): + info = {} + for field_name, field_type in header: + + if field_type == 'QString': + field_value = read_qstring(f) + print(field_name, field_type, len(field_value), field_value) + else: + field_value = np.fromfile(f, dtype=field_type, count=1)[0] + print(field_name, field_type, field_value) + + info[field_name] = field_value + + return info + + + +def read_rhd(filename): + return + + +def read_rhs(filename): + with open(filename, mode='rb') as f: + info = read_variable_header(f, rhs_global_header) + + + + + + + \ No newline at end of file From 139930b0aab6e8289b02d93f2d24a31f83c4e039 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Mon, 5 Nov 2018 22:45:35 +0100 Subject: [PATCH 14/41] intan RHS WIP --- neo/rawio/intanrawio.py | 183 ++++++++++++++++++++++------------------ 1 file changed, 101 insertions(+), 82 deletions(-) diff --git a/neo/rawio/intanrawio.py b/neo/rawio/intanrawio.py index 9138a39cd..84695e10a 100644 --- a/neo/rawio/intanrawio.py +++ b/neo/rawio/intanrawio.py @@ -3,7 +3,9 @@ Support for intan tech rhd and rhs files. -This 2 formats are more or less the same with some variance in headers. +This 2 formats are more or less the same but: + * some variance in headers. + * rhs amplifier is more complexe because the optional DC channel See: * http://intantech.com/files/Intan_RHD2000_data_file_formats.pdf @@ -22,6 +24,9 @@ from collections import OrderedDict +BLOCK_SIZE = 128 # sample per block + + class IntanRawIO(BaseRawIO): """ @@ -39,24 +44,34 @@ def _source_name(self): def _parse_header(self): if self.filename.endswith('.rhs'): - info = read_rhs(self.filename) + self._global_info, self._channels_info, data_dtype, header_size = read_rhs(self.filename) + # self._dc_amplifier_data_saved = bool(self._global_info['dc_amplifier_data_saved']) elif self.filename.endswith('.rhd'): - info = read_rh(self.filename) + self._global_info, self._channels_info, data_dtype, header_size = read_rhd(self.filename) + #  self._dc_amplifier_data_saved = False + + self._sampling_rate = self._global_info['sampling_rate'] - exit() - + print(len(data_dtype)) + self._raw_data = np.memmap(self.filename, dtype=data_dtype, mode='r', offset=header_size) + self._sigs_length = self._raw_data.size * BLOCK_SIZE + # TODO check timestamp continuity + #~ timestamp = self._raw_data['timestamp'].flatten() + #~ assert np.all(np.diff(timestamp)==1) # signals sig_channels = [] - for c in range(nb_channel): - name = 'ch{}grp{}'.format(c, channel_group[c]) + for c, chan_info in enumerate(self._channels_info): + name = chan_info['native_channel_name'] chan_id = c - units = 'mV' - offset = 0. + units = 'uV' + offset = 0. # TODO + gain = 1. # TODO + sig_dtype = 'uint16' group_id = 0 sig_channels.append((name, chan_id, self._sampling_rate, - sig_dtype, units, gain, offset, group_id)) + sig_dtype, units, gain, offset, group_id)) sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype) # No events @@ -81,84 +96,48 @@ def _segment_t_start(self, block_index, seg_index): return 0. def _segment_t_stop(self, block_index, seg_index): - t_stop = self._raw_signals.shape[0] / self._sampling_rate + t_stop = self._sigs_length / self._sampling_rate return t_stop def _get_signal_size(self, block_index, seg_index, channel_indexes): - return self._raw_signals.shape[0] + return self._sigs_length def _get_signal_t_start(self, block_index, seg_index, channel_indexes): return 0. def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes): - if channel_indexes is None: - channel_indexes = slice(None) - raw_signals = self._raw_signals[slice(i_start, i_stop), channel_indexes] - return raw_signals - + if i_start is None: + i_start = 0 + if i_stop is None: + i_stop = self._sigs_length + block_start = i_start // BLOCK_SIZE + block_stop = i_stop // BLOCK_SIZE + 1 + sl0 = i_start % BLOCK_SIZE + sl1 = sl0 + (i_stop - i_start) -def read_qstring(f): - a = '' - length = np.fromfile(f, dtype='uint32', count=1)[0] - - print('length', length, hex(length)) - #~ exit() - - if length == 0xFFFFFFFF or length == '': - return '' - - txt ='' - txt = f.read(length // 2) #.decode('utf-16') - #~ print(txt) - #~ txt = txt.decode() - - #~ for ii in range(length): - #~ print('ii', ii) - #~ newchar = np.fromfile(f, 'u2', 1)[0] - #~ print(newchar) - #~ a += newchar.tostring().decode('utf-16') - return txt - - -import struct -import os -import sys -def read_qstring(fid): - """Read Qt style QString. - - The first 32-bit unsigned number indicates the length of the string (in bytes). - If this number equals 0xFFFFFFFF, the string is null. - - Strings are stored as unicode. - """ - - length, = struct.unpack(' (os.fstat(fid.fileno()).st_size - fid.tell() + 1) : - print(length) - raise Exception('Length too long.') + if channel_indexes is None: + channel_indexes = slice(None) + channel_names = self.header['signal_channels'][channel_indexes]['name'] - # convert length from bytes to 16-bit Unicode words - length = int(length / 2) + sigs_chunk = np.zeros((i_stop - i_start, len(channel_names)), dtype='uint16') + for i, chan_name in enumerate(channel_names): + data = self._raw_data[chan_name] + sigs_chunk[:, i] = data[block_start:block_stop].flatten()[sl0:sl1] - data = [] - for i in range(0, length): - c, = struct.unpack('= (3,0): - a = ''.join([chr(c) for c in data]) - else: - a = ''.join([unichr(c) for c in data]) - return a +def read_qstring(f): + length = np.fromfile(f, dtype='uint32', count=1)[0] + if length == 0xFFFFFFFF or length == 0: + return '' + txt = f.read(length).decode('utf-16') + return txt rhs_global_header =[ ('magic_number', 'uint32'), # 0xD69127AC for rhs 0xC6912702 for rdh @@ -215,7 +194,7 @@ def read_qstring(fid): ('amplified_channel_num', 'int16'), ] -signal_channel_header = [ +rhs_signal_channel_header = [ ('native_channel_name', 'QString'), ('custom_channel_name', 'QString'), ('native_order', 'int16'), @@ -223,6 +202,7 @@ def read_qstring(fid): ('signal_type', 'int16'), ('channel_enabled', 'int16'), ('chip_channel_num', 'int16'), + ('command_stream', 'int16'), ####### ('board_stream_num', 'int16'), ('spike_scope_trigger_mode', 'int16'), ('spike_scope_voltage_thresh', 'int16'), @@ -240,11 +220,12 @@ def read_variable_header(f, header): if field_type == 'QString': field_value = read_qstring(f) - print(field_name, field_type, len(field_value), field_value) + #~ print(field_name, field_type, len(field_value), field_value) else: field_value = np.fromfile(f, dtype=field_type, count=1)[0] - print(field_name, field_type, field_value) - + #~ print(field_name, field_type, field_value) + + #~ print(field_name, ':', field_value) info[field_name] = field_value return info @@ -255,14 +236,52 @@ def read_rhd(filename): return + +# signal_type +# 0: RHS2000 amplifier channel. +# 3: Analog input channel. +# 4: Analog output channel. +# 5: Digital input channel. +# 6: Digital output channel. + + + def read_rhs(filename): with open(filename, mode='rb') as f: - info = read_variable_header(f, rhs_global_header) - - - - - - + global_info = read_variable_header(f, rhs_global_header) + + print(global_info['dc_amplifier_data_saved'], bool(global_info['dc_amplifier_data_saved'])) + channels_info = [] + data_dtype = [('timestamp', 'int32', BLOCK_SIZE)] + for g in range(global_info['nb_signal_group']): + #~ print('goup', g) + group_info = read_variable_header(f, signal_group_header) + print(group_info) + if bool(group_info['signal_group_enabled']): + for c in range(group_info['channel_num']): + #~ print(' c', c) + chan_info = read_variable_header(f, rhs_signal_channel_header) + + if bool(chan_info['channel_enabled']): + channels_info.append(chan_info) + print('goup', g, 'channel', c, chan_info['native_channel_name']) + name = chan_info['native_channel_name'] + data_dtype +=[(name, 'int32', BLOCK_SIZE)] + + if chan_info['signal_type'] == 0: + if bool(global_info['dc_amplifier_data_saved']): + chan_info_dc = dict(chan_info) + chan_info_dc['native_channel_name'] = name+'_DC' + channels_info.append(chan_info_dc) + data_dtype +=[(name+'_DC', 'int32', BLOCK_SIZE)] + + chan_info_stim = dict(chan_info) + chan_info_stim['native_channel_name'] = name+'_STIM' + channels_info.append(chan_info_stim) + data_dtype +=[(name+'_STIM', 'int32', BLOCK_SIZE)] + + header_size = f.tell() - \ No newline at end of file + return global_info, channels_info, data_dtype, header_size + + From 98abd5dd52078255e94fb76a2d06777b03f3d628 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 6 Nov 2018 00:00:10 +0100 Subject: [PATCH 15/41] WIP RHD files --- neo/rawio/intanrawio.py | 246 +++++++++++++++++++++++++++++++++------- 1 file changed, 206 insertions(+), 40 deletions(-) diff --git a/neo/rawio/intanrawio.py b/neo/rawio/intanrawio.py index 84695e10a..c8de1d2fd 100644 --- a/neo/rawio/intanrawio.py +++ b/neo/rawio/intanrawio.py @@ -24,7 +24,6 @@ from collections import OrderedDict -BLOCK_SIZE = 128 # sample per block class IntanRawIO(BaseRawIO): @@ -44,17 +43,17 @@ def _source_name(self): def _parse_header(self): if self.filename.endswith('.rhs'): - self._global_info, self._channels_info, data_dtype, header_size = read_rhs(self.filename) + self._global_info, self._channels_info, data_dtype, header_size, self.block_size = read_rhs(self.filename) # self._dc_amplifier_data_saved = bool(self._global_info['dc_amplifier_data_saved']) elif self.filename.endswith('.rhd'): - self._global_info, self._channels_info, data_dtype, header_size = read_rhd(self.filename) + self._global_info, self._channels_info, data_dtype, header_size, self.block_size = read_rhd(self.filename) #  self._dc_amplifier_data_saved = False self._sampling_rate = self._global_info['sampling_rate'] print(len(data_dtype)) self._raw_data = np.memmap(self.filename, dtype=data_dtype, mode='r', offset=header_size) - self._sigs_length = self._raw_data.size * BLOCK_SIZE + self._sigs_length = self._raw_data.size * self.block_size # TODO check timestamp continuity #~ timestamp = self._raw_data['timestamp'].flatten() @@ -112,9 +111,9 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, chann if i_stop is None: i_stop = self._sigs_length - block_start = i_start // BLOCK_SIZE - block_stop = i_stop // BLOCK_SIZE + 1 - sl0 = i_start % BLOCK_SIZE + block_start = i_start // self.block_size + block_stop = i_stop // self.block_size + 1 + sl0 = i_start % self.block_size sl1 = sl0 + (i_stop - i_start) if channel_indexes is None: @@ -138,9 +137,196 @@ def read_qstring(f): return '' txt = f.read(length).decode('utf-16') return txt + + + +def read_variable_header(f, header): + info = {} + for field_name, field_type in header: + + if field_type == 'QString': + field_value = read_qstring(f) + #~ print(field_name, field_type, len(field_value), field_value) + else: + field_value = np.fromfile(f, dtype=field_type, count=1)[0] + #~ print(field_name, field_type, field_value) + + #~ print(field_name, ':', field_value) + info[field_name] = field_value + + return info + + + + + +rhd_global_header =[ + ('magic_number', 'uint32'), # 0xC6912702 + + ('major_version', 'int16'), + ('minor_version', 'int16'), + + ('sampling_rate', 'float32'), + + ('dsp_enabled', 'int16'), + + ('actual_dsp_cutoff_frequency', 'float32'), + ('actual_lower_bandwidth', 'float32'), + ('actual_upper_bandwidth', 'float32'), + ('desired_dsp_cutoff_frequency', 'float32'), + ('desired_lower_bandwidth', 'float32'), + ('desired_upper_bandwidth', 'float32'), + + ('notch_filter_mode', 'int16'), + + ('desired_impedance_test_frequency', 'float32'), + ('actual_impedance_test_frequency', 'float32'), + + ('note1', 'QString'), + ('note2', 'QString'), + ('note3', 'QString'), + + ('nb_temp_sensor', 'int16'), + + + + #~ ('board_mode', 'int16'), + + #~ ('ref_channel_name', 'QString'), + + #~ ('nb_signal_group', 'int16'), + +] + +rhd_global_header_v11 = [ + ('num_temp_sensor_channels', 'int16'), +] + +rhd_global_header_v13 = [ + ('eval_board_mode', 'int16'), +] + +rhd_global_header_v20 = [ + ('reference_channel', 'QString'), +] + +rhd_signal_group_header = [ + ('signal_group_name', 'QString'), + ('signal_group_prefix', 'QString'), + ('signal_group_enabled', 'int16'), + ('channel_num', 'int16'), + ('amplified_channel_num', 'int16'), +] + +rhd_signal_channel_header = [ + ('native_channel_name', 'QString'), + ('custom_channel_name', 'QString'), + ('native_order', 'int16'), + ('custom_order', 'int16'), + ('signal_type', 'int16'), + ('channel_enabled', 'int16'), + ('chip_channel_num', 'int16'), + ('board_stream_num', 'int16'), + ('spike_scope_trigger_mode', 'int16'), + ('spike_scope_voltage_thresh', 'int16'), + ('spike_scope_digital_trigger_channel', 'int16'), + ('spike_scope_digital_edge_polarity', 'int16'), + ('electrode_impedance_magnitude', 'float32'), + ('electrode_impedance_phase', 'float32'), + +] + + +# signal type +# 0: RHD2000 amplifier channel +# 1: RHD2000 auxiliary input channel +# 2: RHD2000 supply voltage channel +# 3: USB board ADC input channel +# 4: USB board digital input channel +# 5: USB board digital output channel + + + +def read_rhd(filename): + # TODO FIXME : error in dtype order + + with open(filename, mode='rb') as f: + global_info = read_variable_header(f, rhd_global_header) + + if ['major_version']>=2: + BLOCK_SIZE = 128 + else: + BLOCK_SIZE = 60 # 256 channels + + global_info['num_temp_sensor_channels'] = 0 + global_info['eval_board_mode'] = 0 + global_info['eval_board_mode'] = 0 + + if (['major_version'] == 1 and ['minor_version'] >= 1) or (['major_version'] >= 2): + global_info.update(read_variable_header(f, rhd_global_header_v11)) + + if (['major_version'] == 1 and ['minor_version'] >= 3) or (['major_version'] >= 2): + global_info.update(read_variable_header(f, rhd_global_header_v13)) + + if ['major_version'] >= 2: + global_info.update(read_variable_header(f, rhd_global_header_v20)) + + global_info.update(read_variable_header(f, [('nb_signal_group', 'int16'),])) + + channels_info = [] + if (['major_version'] == 1 and ['minor_version'] >= 2) or (['major_version'] >= 2): + data_dtype = [('timestamp', 'int32', BLOCK_SIZE)] + else: + data_dtype = [('timestamp', 'uint32', BLOCK_SIZE)] + + for g in range(global_info['nb_signal_group']): + group_info = read_variable_header(f, rhd_signal_group_header) + + if bool(group_info['signal_group_enabled']): + for c in range(group_info['channel_num']): + chan_info = read_variable_header(f, rhd_signal_channel_header) + + if bool(chan_info['channel_enabled']): + channels_info.append(chan_info) + name = chan_info['native_channel_name'] + + if chan_info['signal_type'] in (0, 3, 4, 5): + data_dtype +=[(name, 'uint16', BLOCK_SIZE)] + + if chan_info['signal_type'] in (1, ): + data_dtype +=[(name, 'uint16', BLOCK_SIZE//4)] + + # TODO temperature + if chan_info['signal_type'] in (2, ): + data_dtype +=[(name, 'uint16')] + + #~ global_info['num_temp_sensor_channels'] + + + + + #~ if chan_info['signal_type'] == 0: + #~ if bool(global_info['dc_amplifier_data_saved']): + #~ chan_info_dc = dict(chan_info) + #~ chan_info_dc['native_channel_name'] = name+'_DC' + #~ channels_info.append(chan_info_dc) + #~ data_dtype +=[(name+'_DC', 'int32', BLOCK_SIZE)] + + #~ chan_info_stim = dict(chan_info) + #~ chan_info_stim['native_channel_name'] = name+'_STIM' + #~ channels_info.append(chan_info_stim) + #~ data_dtype +=[(name+'_STIM', 'int32', BLOCK_SIZE)] + + header_size = f.tell() + # TODO sampling_rate for auxilary channel + + return global_info, channels_info, data_dtype, header_size, BLOCK_SIZE + + + rhs_global_header =[ - ('magic_number', 'uint32'), # 0xD69127AC for rhs 0xC6912702 for rdh + ('magic_number', 'uint32'), # 0xD69127AC ('major_version', 'int16'), ('minor_version', 'int16'), @@ -158,7 +344,7 @@ def read_qstring(f): ('desired_lower_settle_bandwidth', 'float32'), ##### ('desired_upper_bandwidth', 'float32'), - ('notch_filter_mode', 'int16'), # 0 :no filter 1: 50Hz 2 : 60Hz + ('notch_filter_mode', 'int16'), ('desired_impedance_test_frequency', 'float32'), ('actual_impedance_test_frequency', 'float32'), @@ -186,7 +372,7 @@ def read_qstring(f): ] -signal_group_header = [ +rhs_signal_group_header = [ ('signal_group_name', 'QString'), ('signal_group_prefix', 'QString'), ('signal_group_enabled', 'int16'), @@ -214,27 +400,7 @@ def read_qstring(f): ] -def read_variable_header(f, header): - info = {} - for field_name, field_type in header: - - if field_type == 'QString': - field_value = read_qstring(f) - #~ print(field_name, field_type, len(field_value), field_value) - else: - field_value = np.fromfile(f, dtype=field_type, count=1)[0] - #~ print(field_name, field_type, field_value) - - #~ print(field_name, ':', field_value) - info[field_name] = field_value - - return info - - -def read_rhd(filename): - return - # signal_type @@ -247,41 +413,41 @@ def read_rhd(filename): def read_rhs(filename): + # TODO FIXME : error in dtype order + + BLOCK_SIZE = 128 # sample per block + with open(filename, mode='rb') as f: global_info = read_variable_header(f, rhs_global_header) - print(global_info['dc_amplifier_data_saved'], bool(global_info['dc_amplifier_data_saved'])) channels_info = [] data_dtype = [('timestamp', 'int32', BLOCK_SIZE)] for g in range(global_info['nb_signal_group']): - #~ print('goup', g) - group_info = read_variable_header(f, signal_group_header) - print(group_info) + group_info = read_variable_header(f, rhs_signal_group_header) + if bool(group_info['signal_group_enabled']): for c in range(group_info['channel_num']): - #~ print(' c', c) chan_info = read_variable_header(f, rhs_signal_channel_header) if bool(chan_info['channel_enabled']): channels_info.append(chan_info) - print('goup', g, 'channel', c, chan_info['native_channel_name']) name = chan_info['native_channel_name'] - data_dtype +=[(name, 'int32', BLOCK_SIZE)] + data_dtype +=[(name, 'uint16', BLOCK_SIZE)] if chan_info['signal_type'] == 0: if bool(global_info['dc_amplifier_data_saved']): chan_info_dc = dict(chan_info) chan_info_dc['native_channel_name'] = name+'_DC' channels_info.append(chan_info_dc) - data_dtype +=[(name+'_DC', 'int32', BLOCK_SIZE)] + data_dtype +=[(name+'_DC', 'uint16', BLOCK_SIZE)] chan_info_stim = dict(chan_info) chan_info_stim['native_channel_name'] = name+'_STIM' channels_info.append(chan_info_stim) - data_dtype +=[(name+'_STIM', 'int32', BLOCK_SIZE)] + data_dtype +=[(name+'_STIM', 'uint16', BLOCK_SIZE)] header_size = f.tell() - return global_info, channels_info, data_dtype, header_size + return global_info, channels_info, data_dtype, header_size, BLOCK_SIZE From c21bc62862aab7173c615a5da6cfe32c3cde75b4 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 6 Nov 2018 23:38:11 +0100 Subject: [PATCH 16/41] WIP RHD and RHS --- neo/rawio/intanrawio.py | 377 +++++++++++++++++++++++----------------- 1 file changed, 219 insertions(+), 158 deletions(-) diff --git a/neo/rawio/intanrawio.py b/neo/rawio/intanrawio.py index c8de1d2fd..152d96604 100644 --- a/neo/rawio/intanrawio.py +++ b/neo/rawio/intanrawio.py @@ -7,6 +7,9 @@ * some variance in headers. * rhs amplifier is more complexe because the optional DC channel +RHS supported version 1.0 +RHD supported version 1.0 1.1 1.2 1.3 2.0 + See: * http://intantech.com/files/Intan_RHD2000_data_file_formats.pdf * http://intantech.com/files/Intan_RHS2000_data_file_formats.pdf @@ -22,6 +25,7 @@ import numpy as np from collections import OrderedDict +from distutils.version import LooseVersion as V @@ -43,25 +47,25 @@ def _source_name(self): def _parse_header(self): if self.filename.endswith('.rhs'): - self._global_info, self._channels_info, data_dtype, header_size, self.block_size = read_rhs(self.filename) - # self._dc_amplifier_data_saved = bool(self._global_info['dc_amplifier_data_saved']) + self._global_info, self._ordered_channels, data_dtype, header_size, self.block_size = read_rhs(self.filename) elif self.filename.endswith('.rhd'): - self._global_info, self._channels_info, data_dtype, header_size, self.block_size = read_rhd(self.filename) - #  self._dc_amplifier_data_saved = False + self._global_info, self._ordered_channels, data_dtype, header_size, self.block_size = read_rhd(self.filename) + # TODO this depend on channel self._sampling_rate = self._global_info['sampling_rate'] - print(len(data_dtype)) self._raw_data = np.memmap(self.filename, dtype=data_dtype, mode='r', offset=header_size) + + # TODO this depend on channel self._sigs_length = self._raw_data.size * self.block_size # TODO check timestamp continuity - #~ timestamp = self._raw_data['timestamp'].flatten() - #~ assert np.all(np.diff(timestamp)==1) + timestamp = self._raw_data['timestamp'].flatten() + assert np.all(np.diff(timestamp)==1) # signals sig_channels = [] - for c, chan_info in enumerate(self._channels_info): + for c, chan_info in enumerate(self._ordered_channels): name = chan_info['native_channel_name'] chan_id = c units = 'uV' @@ -72,6 +76,7 @@ def _parse_header(self): sig_channels.append((name, chan_id, self._sampling_rate, sig_dtype, units, gain, offset, group_id)) sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype) + #~ print(sig_channels) # No events event_channels = [] @@ -122,8 +127,8 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, chann sigs_chunk = np.zeros((i_stop - i_start, len(channel_names)), dtype='uint16') for i, chan_name in enumerate(channel_names): - data = self._raw_data[chan_name] - sigs_chunk[:, i] = data[block_start:block_stop].flatten()[sl0:sl1] + data_chan = self._raw_data[chan_name] + sigs_chunk[:, i] = data_chan[block_start:block_stop].flatten()[sl0:sl1] return sigs_chunk @@ -159,9 +164,13 @@ def read_variable_header(f, header): +############### +# RHS ZONE -rhd_global_header =[ - ('magic_number', 'uint32'), # 0xC6912702 + + +rhs_global_header =[ + ('magic_number', 'uint32'), # 0xD69127AC ('major_version', 'int16'), ('minor_version', 'int16'), @@ -172,9 +181,11 @@ def read_variable_header(f, header): ('actual_dsp_cutoff_frequency', 'float32'), ('actual_lower_bandwidth', 'float32'), + ('actual_lower_settle_bandwidth', 'float32'), ('actual_upper_bandwidth', 'float32'), ('desired_dsp_cutoff_frequency', 'float32'), ('desired_lower_bandwidth', 'float32'), + ('desired_lower_settle_bandwidth', 'float32'), ('desired_upper_bandwidth', 'float32'), ('notch_filter_mode', 'int16'), @@ -182,35 +193,30 @@ def read_variable_header(f, header): ('desired_impedance_test_frequency', 'float32'), ('actual_impedance_test_frequency', 'float32'), + ('amp_settle_mode', 'int16'), + ('charge_recovery_mode', 'int16'), + + ('stim_step_size', 'float32'), + ('recovery_current_limit', 'float32'), + ('recovery_target_voltage', 'float32'), + ('note1', 'QString'), ('note2', 'QString'), ('note3', 'QString'), - ('nb_temp_sensor', 'int16'), + ('dc_amplifier_data_saved', 'int16'), - #~ ('board_mode', 'int16'), + ('board_mode', 'int16'), - #~ ('ref_channel_name', 'QString'), + ('ref_channel_name', 'QString'), - #~ ('nb_signal_group', 'int16'), + ('nb_signal_group', 'int16'), ] -rhd_global_header_v11 = [ - ('num_temp_sensor_channels', 'int16'), -] - -rhd_global_header_v13 = [ - ('eval_board_mode', 'int16'), -] - -rhd_global_header_v20 = [ - ('reference_channel', 'QString'), -] - -rhd_signal_group_header = [ +rhs_signal_group_header = [ ('signal_group_name', 'QString'), ('signal_group_prefix', 'QString'), ('signal_group_enabled', 'int16'), @@ -218,7 +224,7 @@ def read_variable_header(f, header): ('amplified_channel_num', 'int16'), ] -rhd_signal_channel_header = [ +rhs_signal_channel_header = [ ('native_channel_name', 'QString'), ('custom_channel_name', 'QString'), ('native_order', 'int16'), @@ -226,6 +232,7 @@ def read_variable_header(f, header): ('signal_type', 'int16'), ('channel_enabled', 'int16'), ('chip_channel_num', 'int16'), + ('command_stream', 'int16'), ####### ('board_stream_num', 'int16'), ('spike_scope_trigger_mode', 'int16'), ('spike_scope_voltage_thresh', 'int16'), @@ -237,99 +244,98 @@ def read_variable_header(f, header): ] -# signal type -# 0: RHD2000 amplifier channel -# 1: RHD2000 auxiliary input channel -# 2: RHD2000 supply voltage channel -# 3: USB board ADC input channel -# 4: USB board digital input channel -# 5: USB board digital output channel -def read_rhd(filename): - # TODO FIXME : error in dtype order - - with open(filename, mode='rb') as f: - global_info = read_variable_header(f, rhd_global_header) - - if ['major_version']>=2: - BLOCK_SIZE = 128 - else: - BLOCK_SIZE = 60 # 256 channels - - global_info['num_temp_sensor_channels'] = 0 - global_info['eval_board_mode'] = 0 - global_info['eval_board_mode'] = 0 - - if (['major_version'] == 1 and ['minor_version'] >= 1) or (['major_version'] >= 2): - global_info.update(read_variable_header(f, rhd_global_header_v11)) +# signal_type +# 0: RHS2000 amplifier channel. +# 3: Analog input channel. +# 4: Analog output channel. +# 5: Digital input channel. +# 6: Digital output channel. - if (['major_version'] == 1 and ['minor_version'] >= 3) or (['major_version'] >= 2): - global_info.update(read_variable_header(f, rhd_global_header_v13)) - - if ['major_version'] >= 2: - global_info.update(read_variable_header(f, rhd_global_header_v20)) - - global_info.update(read_variable_header(f, [('nb_signal_group', 'int16'),])) - - channels_info = [] - if (['major_version'] == 1 and ['minor_version'] >= 2) or (['major_version'] >= 2): - data_dtype = [('timestamp', 'int32', BLOCK_SIZE)] - else: - data_dtype = [('timestamp', 'uint32', BLOCK_SIZE)] + + +def read_rhs(filename): + BLOCK_SIZE = 128 # sample per block + + with open(filename, mode='rb') as f: + global_info = read_variable_header(f, rhs_global_header) + channels_by_type = {k:[] for k in [0,3, 4, 5, 6]} for g in range(global_info['nb_signal_group']): - group_info = read_variable_header(f, rhd_signal_group_header) + group_info = read_variable_header(f, rhs_signal_group_header) + #~ print(group_info) if bool(group_info['signal_group_enabled']): for c in range(group_info['channel_num']): - chan_info = read_variable_header(f, rhd_signal_channel_header) - + chan_info = read_variable_header(f, rhs_signal_channel_header) + assert chan_info['signal_type'] not in (1, 2) if bool(chan_info['channel_enabled']): - channels_info.append(chan_info) - name = chan_info['native_channel_name'] - - if chan_info['signal_type'] in (0, 3, 4, 5): - data_dtype +=[(name, 'uint16', BLOCK_SIZE)] - - if chan_info['signal_type'] in (1, ): - data_dtype +=[(name, 'uint16', BLOCK_SIZE//4)] - - # TODO temperature - if chan_info['signal_type'] in (2, ): - data_dtype +=[(name, 'uint16')] - - #~ global_info['num_temp_sensor_channels'] - - - - - #~ if chan_info['signal_type'] == 0: - #~ if bool(global_info['dc_amplifier_data_saved']): - #~ chan_info_dc = dict(chan_info) - #~ chan_info_dc['native_channel_name'] = name+'_DC' - #~ channels_info.append(chan_info_dc) - #~ data_dtype +=[(name+'_DC', 'int32', BLOCK_SIZE)] - - #~ chan_info_stim = dict(chan_info) - #~ chan_info_stim['native_channel_name'] = name+'_STIM' - #~ channels_info.append(chan_info_stim) - #~ data_dtype +=[(name+'_STIM', 'int32', BLOCK_SIZE)] - + channels_by_type[chan_info['signal_type']].append(chan_info) + header_size = f.tell() - # TODO sampling_rate for auxilary channel - return global_info, channels_info, data_dtype, header_size, BLOCK_SIZE + # construct dtype by re-ordering channels by types + ordered_channels = [] + data_dtype = [('timestamp', 'int32', BLOCK_SIZE)] + for chan_info in channels_by_type[0]: + name = chan_info['native_channel_name'] + ordered_channels.append(chan_info) + data_dtype +=[(name, 'uint16', BLOCK_SIZE)] + + if bool(global_info['dc_amplifier_data_saved']): + for chan_info in channels_by_type[0]: + name = chan_info['native_channel_name'] + chan_info_dc = dict(chan_info) + chan_info_dc['native_channel_name'] = name+'_DC' + ordered_channels.append(chan_info_dc) + data_dtype +=[(name+'_DC', 'uint16', BLOCK_SIZE)] + + for chan_info in channels_by_type[0]: + name = chan_info['native_channel_name'] + chan_info_stim = dict(chan_info) + chan_info_stim['native_channel_name'] = name+'_STIM' + ordered_channels.append(chan_info_stim) + data_dtype +=[(name+'_STIM', 'uint16', BLOCK_SIZE)] + + for sig_type in [3, 4, ]: + for chan_info in channels_by_type[sig_type]: + name = chan_info['native_channel_name'] + ordered_channels.append(chan_info) + data_dtype +=[(name, 'uint16', BLOCK_SIZE)] + + for sig_type in [5, 6]: + if len(channels_by_type[sig_type]) > 0: + name = {5:'DIGITAL-IN', 6:'DIGITAL-OUT' }[sig_type] + data_dtype +=[(name, 'uint16', BLOCK_SIZE)] + + for e in data_dtype: + print(e) + + #~ for chan_info in ordered_channels: + #~ print(chan_info) + + #~ print(data_dtype) + #~ exit() + return global_info, ordered_channels, data_dtype, header_size, BLOCK_SIZE -rhs_global_header =[ - ('magic_number', 'uint32'), # 0xD69127AC +############### +# RHD ZONE + + +rhd_global_header_base =[ + ('magic_number', 'uint32'), # 0xC6912702 ('major_version', 'int16'), ('minor_version', 'int16'), +] + + +rhd_global_header_part1 =[ ('sampling_rate', 'float32'), @@ -337,11 +343,9 @@ def read_rhd(filename): ('actual_dsp_cutoff_frequency', 'float32'), ('actual_lower_bandwidth', 'float32'), - ('actual_lower_settle_bandwidth', 'float32'), ####### ('actual_upper_bandwidth', 'float32'), ('desired_dsp_cutoff_frequency', 'float32'), ('desired_lower_bandwidth', 'float32'), - ('desired_lower_settle_bandwidth', 'float32'), ##### ('desired_upper_bandwidth', 'float32'), ('notch_filter_mode', 'int16'), @@ -349,30 +353,31 @@ def read_rhd(filename): ('desired_impedance_test_frequency', 'float32'), ('actual_impedance_test_frequency', 'float32'), - ('amp_settle_mode', 'int16'), #### - ('charge_recovery_mode', 'int16'), #### - - ('stim_step_size', 'float32'), #### - ('recovery_current_limit', 'float32'), #### - ('recovery_target_voltage', 'float32'), #### - ('note1', 'QString'), ('note2', 'QString'), ('note3', 'QString'), - ('dc_amplifier_data_saved', 'int16'), ###### nb_temp_sensor - - - - ('board_mode', 'int16'), - - ('ref_channel_name', 'QString'), + ('nb_temp_sensor', 'int16'), +] + +rhd_global_header_v11 = [ + ('num_temp_sensor_channels', 'int16'), +] + +rhd_global_header_v13 = [ + ('eval_board_mode', 'int16'), +] + +rhd_global_header_v20 = [ + ('reference_channel', 'QString'), +] + +rhd_global_header_final = [ ('nb_signal_group', 'int16'), - ] -rhs_signal_group_header = [ +rhd_signal_group_header = [ ('signal_group_name', 'QString'), ('signal_group_prefix', 'QString'), ('signal_group_enabled', 'int16'), @@ -380,7 +385,7 @@ def read_rhd(filename): ('amplified_channel_num', 'int16'), ] -rhs_signal_channel_header = [ +rhd_signal_channel_header = [ ('native_channel_name', 'QString'), ('custom_channel_name', 'QString'), ('native_order', 'int16'), @@ -388,7 +393,6 @@ def read_rhd(filename): ('signal_type', 'int16'), ('channel_enabled', 'int16'), ('chip_channel_num', 'int16'), - ('command_stream', 'int16'), ####### ('board_stream_num', 'int16'), ('spike_scope_trigger_mode', 'int16'), ('spike_scope_voltage_thresh', 'int16'), @@ -402,52 +406,109 @@ def read_rhd(filename): - -# signal_type -# 0: RHS2000 amplifier channel. -# 3: Analog input channel. -# 4: Analog output channel. -# 5: Digital input channel. -# 6: Digital output channel. +# signal type +# 0: RHD2000 amplifier channel +# 1: RHD2000 auxiliary input channel +# 2: RHD2000 supply voltage channel +# 3: USB board ADC input channel +# 4: USB board digital input channel +# 5: USB board digital output channel -def read_rhs(filename): +def read_rhd(filename): # TODO FIXME : error in dtype order - BLOCK_SIZE = 128 # sample per block - with open(filename, mode='rb') as f: - global_info = read_variable_header(f, rhs_global_header) - channels_info = [] - data_dtype = [('timestamp', 'int32', BLOCK_SIZE)] + global_info = read_variable_header(f, rhd_global_header_base) + + version = V('{major_version}.{minor_version}'.format(global_info)) + print(version) + + # the header size depend on the version :-( + header = list(rhd_global_header_part1) # make a copy + + if version>='1.1': + header = header + rhd_global_header_v11 + else: + global_info['num_temp_sensor_channels'] = 0 + + if version >= '1.3': + header = header + rhd_global_header_v13 + else: + global_info['eval_board_mode'] = 0 + + if version >= '2.0': + header = header + rhd_global_header_v20 + else: + global_info['reference_channel'] = '' + + header = header + rhd_global_header_final + + global_info.update(read_variable_header(f, header)) + + # read channel group and channel header + channels_by_type = {k:[] for k in [0, 1, 2, 3, 4, 5,]} for g in range(global_info['nb_signal_group']): - group_info = read_variable_header(f, rhs_signal_group_header) + group_info = read_variable_header(f, rhd_signal_group_header) if bool(group_info['signal_group_enabled']): for c in range(group_info['channel_num']): - chan_info = read_variable_header(f, rhs_signal_channel_header) - + chan_info = read_variable_header(f, rhd_signal_channel_header) if bool(chan_info['channel_enabled']): - channels_info.append(chan_info) - name = chan_info['native_channel_name'] - data_dtype +=[(name, 'uint16', BLOCK_SIZE)] - - if chan_info['signal_type'] == 0: - if bool(global_info['dc_amplifier_data_saved']): - chan_info_dc = dict(chan_info) - chan_info_dc['native_channel_name'] = name+'_DC' - channels_info.append(chan_info_dc) - data_dtype +=[(name+'_DC', 'uint16', BLOCK_SIZE)] - - chan_info_stim = dict(chan_info) - chan_info_stim['native_channel_name'] = name+'_STIM' - channels_info.append(chan_info_stim) - data_dtype +=[(name+'_STIM', 'uint16', BLOCK_SIZE)] - + channels_by_type[chan_info['signal_type']].append(chan_info) + header_size = f.tell() + + # TODO sampling_rate for auxilary channel - return global_info, channels_info, data_dtype, header_size, BLOCK_SIZE - + # construct the data block dtype + + if version>='2.0': + BLOCK_SIZE = 128 + else: + BLOCK_SIZE = 60 # 256 channels + ordered_channels = [] + + if version>='1.2': + data_dtype = [('timestamp', 'int32', BLOCK_SIZE)] + else: + data_dtype = [('timestamp', 'uint32', BLOCK_SIZE)] + + # 0: RHD2000 amplifier channel + for chan_info in channels_by_type[0]: + name = chan_info['native_channel_name'] + ordered_channels.append(chan_info) + data_dtype +=[(name, 'uint16', BLOCK_SIZE)] + + # 1: RHD2000 auxiliary input channel + for chan_info in channels_by_type[1]: + name = chan_info['native_channel_name'] + ordered_channels.append(chan_info) + data_dtype +=[(name, 'uint16', BLOCK_SIZE//4)] + + # 2: RHD2000 supply voltage channel + for chan_info in channels_by_type[2]: + name = chan_info['native_channel_name'] + ordered_channels.append(chan_info) + data_dtype +=[('supply_voltage', 'uint16')] + + for i in range(global_info['num_temp_sensor_channels']): + data_dtype +=[('temperature_{}'.format(i), 'uint16')] + + # 3: USB board ADC input channel + for chan_info in channels_by_type[3]: + name = chan_info['native_channel_name'] + ordered_channels.append(chan_info) + data_dtype +=[(name, 'uint16', BLOCK_SIZE)] + + # 4: USB board digital input channel + # 5: USB board digital output channel + for sig_type in [4, 5]: + if len(channels_by_type[sig_type]) > 0: + name = {4:'DIGITAL-IN', 5:'DIGITAL-OUT' }[sig_type] + data_dtype +=[(name, 'uint16', BLOCK_SIZE)] + + return global_info, ordered_channels, data_dtype, header_size, BLOCK_SIZE From 1e59a9f3dfc0f979181fdc1607077d9a62b0bbe4 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 6 Nov 2018 23:43:36 +0100 Subject: [PATCH 17/41] Some clean. --- neo/rawio/intanrawio.py | 72 +++++++---------------------------------- 1 file changed, 11 insertions(+), 61 deletions(-) diff --git a/neo/rawio/intanrawio.py b/neo/rawio/intanrawio.py index 152d96604..1bd46ba9d 100644 --- a/neo/rawio/intanrawio.py +++ b/neo/rawio/intanrawio.py @@ -76,7 +76,6 @@ def _parse_header(self): sig_channels.append((name, chan_id, self._sampling_rate, sig_dtype, units, gain, offset, group_id)) sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype) - #~ print(sig_channels) # No events event_channels = [] @@ -134,31 +133,22 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, chann - - def read_qstring(f): length = np.fromfile(f, dtype='uint32', count=1)[0] if length == 0xFFFFFFFF or length == 0: return '' txt = f.read(length).decode('utf-16') return txt - - + def read_variable_header(f, header): info = {} for field_name, field_type in header: - if field_type == 'QString': field_value = read_qstring(f) - #~ print(field_name, field_type, len(field_value), field_value) else: field_value = np.fromfile(f, dtype=field_type, count=1)[0] - #~ print(field_name, field_type, field_value) - - #~ print(field_name, ':', field_value) info[field_name] = field_value - return info @@ -167,8 +157,6 @@ def read_variable_header(f, header): ############### # RHS ZONE - - rhs_global_header =[ ('magic_number', 'uint32'), # 0xD69127AC @@ -206,14 +194,11 @@ def read_variable_header(f, header): ('dc_amplifier_data_saved', 'int16'), - - ('board_mode', 'int16'), ('ref_channel_name', 'QString'), ('nb_signal_group', 'int16'), - ] rhs_signal_group_header = [ @@ -244,18 +229,6 @@ def read_variable_header(f, header): ] - - - -# signal_type -# 0: RHS2000 amplifier channel. -# 3: Analog input channel. -# 4: Analog output channel. -# 5: Digital input channel. -# 6: Digital output channel. - - - def read_rhs(filename): BLOCK_SIZE = 128 # sample per block @@ -265,7 +238,6 @@ def read_rhs(filename): channels_by_type = {k:[] for k in [0,3, 4, 5, 6]} for g in range(global_info['nb_signal_group']): group_info = read_variable_header(f, rhs_signal_group_header) - #~ print(group_info) if bool(group_info['signal_group_enabled']): for c in range(group_info['channel_num']): @@ -279,6 +251,8 @@ def read_rhs(filename): # construct dtype by re-ordering channels by types ordered_channels = [] data_dtype = [('timestamp', 'int32', BLOCK_SIZE)] + + # 0: RHS2000 amplifier channel. for chan_info in channels_by_type[0]: name = chan_info['native_channel_name'] ordered_channels.append(chan_info) @@ -298,45 +272,36 @@ def read_rhs(filename): chan_info_stim['native_channel_name'] = name+'_STIM' ordered_channels.append(chan_info_stim) data_dtype +=[(name+'_STIM', 'uint16', BLOCK_SIZE)] - + + # 3: Analog input channel. + # 4: Analog output channel. for sig_type in [3, 4, ]: for chan_info in channels_by_type[sig_type]: name = chan_info['native_channel_name'] ordered_channels.append(chan_info) data_dtype +=[(name, 'uint16', BLOCK_SIZE)] - + + # 5: Digital input channel. + # 6: Digital output channel. for sig_type in [5, 6]: if len(channels_by_type[sig_type]) > 0: name = {5:'DIGITAL-IN', 6:'DIGITAL-OUT' }[sig_type] data_dtype +=[(name, 'uint16', BLOCK_SIZE)] - for e in data_dtype: - print(e) - - #~ for chan_info in ordered_channels: - #~ print(chan_info) - - #~ print(data_dtype) - #~ exit() return global_info, ordered_channels, data_dtype, header_size, BLOCK_SIZE - - ############### # RHD ZONE - rhd_global_header_base =[ ('magic_number', 'uint32'), # 0xC6912702 - ('major_version', 'int16'), ('minor_version', 'int16'), ] -rhd_global_header_part1 =[ - +rhd_global_header_part1 =[ ('sampling_rate', 'float32'), ('dsp_enabled', 'int16'), @@ -357,8 +322,7 @@ def read_rhs(filename): ('note2', 'QString'), ('note3', 'QString'), - ('nb_temp_sensor', 'int16'), - + ('nb_temp_sensor', 'int16'), ] rhd_global_header_v11 = [ @@ -404,21 +368,7 @@ def read_rhs(filename): ] - - -# signal type -# 0: RHD2000 amplifier channel -# 1: RHD2000 auxiliary input channel -# 2: RHD2000 supply voltage channel -# 3: USB board ADC input channel -# 4: USB board digital input channel -# 5: USB board digital output channel - - - def read_rhd(filename): - # TODO FIXME : error in dtype order - with open(filename, mode='rb') as f: global_info = read_variable_header(f, rhd_global_header_base) From d75aa5e94902d31be85787dfdf9c9e3f61a086f2 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 7 Nov 2018 11:10:47 +0100 Subject: [PATCH 18/41] add neo.io class for intanio --- neo/io/__init__.py | 4 ++ neo/io/intanio.py | 12 ++++ neo/rawio/intanrawio.py | 107 +++++++++++++++++++++++++------- neo/test/iotest/test_intanio.py | 25 ++++++++ 4 files changed, 126 insertions(+), 22 deletions(-) create mode 100644 neo/io/intanio.py create mode 100644 neo/test/iotest/test_intanio.py diff --git a/neo/io/__init__.py b/neo/io/__init__.py index e53e71a0f..1c50d979a 100644 --- a/neo/io/__init__.py +++ b/neo/io/__init__.py @@ -43,6 +43,8 @@ .. autoclass:: neo.io.IgorIO +.. autoclass:: neo.io.IntanIO + .. autoclass:: neo.io.KlustaKwikIO .. autoclass:: neo.io.KwikIO @@ -117,6 +119,7 @@ # from neo.io.elphyio import ElphyIO from neo.io.exampleio import ExampleIO from neo.io.igorproio import IgorIO +from neo.io.intanio import IntanIO from neo.io.klustakwikio import KlustaKwikIO from neo.io.kwikio import KwikIO from neo.io.micromedio import MicromedIO @@ -155,6 +158,7 @@ # ElphyIO, ExampleIO, IgorIO, + IntanIO, KlustaKwikIO, KwikIO, MicromedIO, diff --git a/neo/io/intanio.py b/neo/io/intanio.py new file mode 100644 index 000000000..f7de06f48 --- /dev/null +++ b/neo/io/intanio.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- + +from neo.io.basefromrawio import BaseFromRaw +from neo.rawio.intanrawio import IntanRawIO + + +class IntanIO(IntanRawIO, BaseFromRaw): + __doc__ = IntanRawIO.__doc__ + _prefered_signal_group_mode = 'group-by-same-units' + def __init__(self, filename): + IntanRawIO.__init__(self, filename=filename) + BaseFromRaw.__init__(self, filename) diff --git a/neo/rawio/intanrawio.py b/neo/rawio/intanrawio.py index 1bd46ba9d..4f0d73f17 100644 --- a/neo/rawio/intanrawio.py +++ b/neo/rawio/intanrawio.py @@ -50,16 +50,11 @@ def _parse_header(self): self._global_info, self._ordered_channels, data_dtype, header_size, self.block_size = read_rhs(self.filename) elif self.filename.endswith('.rhd'): self._global_info, self._ordered_channels, data_dtype, header_size, self.block_size = read_rhd(self.filename) - - # TODO this depend on channel - self._sampling_rate = self._global_info['sampling_rate'] - + + # memmap raw data with the complicated structured dtype self._raw_data = np.memmap(self.filename, dtype=data_dtype, mode='r', offset=header_size) - # TODO this depend on channel - self._sigs_length = self._raw_data.size * self.block_size - - # TODO check timestamp continuity + # check timestamp continuity timestamp = self._raw_data['timestamp'].flatten() assert np.all(np.diff(timestamp)==1) @@ -67,15 +62,20 @@ def _parse_header(self): sig_channels = [] for c, chan_info in enumerate(self._ordered_channels): name = chan_info['native_channel_name'] - chan_id = c - units = 'uV' - offset = 0. # TODO - gain = 1. # TODO - sig_dtype = 'uint16' + chan_id = c # the chan_id have no meaning in intan + if chan_info['signal_type'] == 20: + # exception for temperature + sig_dtype = 'int16' + else: + sig_dtype = 'uint16' group_id = 0 - sig_channels.append((name, chan_id, self._sampling_rate, - sig_dtype, units, gain, offset, group_id)) + sig_channels.append((name, chan_id, chan_info['sampling_rate'], + sig_dtype, chan_info['units'], chan_info['gain'], + chan_info['offset'], chan_info['signal_type'])) sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype) + + self._max_sampling_rate = np.max(sig_channels['sampling_rate']) + self._max_sigs_length = self._raw_data.size * self.block_size # No events event_channels = [] @@ -99,11 +99,16 @@ def _segment_t_start(self, block_index, seg_index): return 0. def _segment_t_stop(self, block_index, seg_index): - t_stop = self._sigs_length / self._sampling_rate + t_stop = self._max_sigs_length / self._max_sampling_rate return t_stop def _get_signal_size(self, block_index, seg_index, channel_indexes): - return self._sigs_length + assert channel_indexes is not None, 'channel_indexes cannot be None, several signal size' + assert np.unique(self.header['signal_channels'][channel_indexes]['group_id']).size == 1 + channel_names = self.header['signal_channels'][channel_indexes]['name'] + chan_name = channel_names[0] + size = self._raw_data[chan_name].size + return size def _get_signal_t_start(self, block_index, seg_index, channel_indexes): return 0. @@ -113,7 +118,7 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, chann if i_start is None: i_start = 0 if i_stop is None: - i_stop = self._sigs_length + i_stop = self._get_signal_size(block_index, seg_index, channel_indexes) block_start = i_start // self.block_size block_stop = i_stop // self.block_size + 1 @@ -248,6 +253,8 @@ def read_rhs(filename): header_size = f.tell() + sr = global_info['sampling_rate'] + # construct dtype by re-ordering channels by types ordered_channels = [] data_dtype = [('timestamp', 'int32', BLOCK_SIZE)] @@ -255,6 +262,10 @@ def read_rhs(filename): # 0: RHS2000 amplifier channel. for chan_info in channels_by_type[0]: name = chan_info['native_channel_name'] + chan_info['sampling_rate'] = sr + chan_info['units'] = 'uV' + chan_info['gain'] = 0.195 + chan_info['offset'] = -32768 * 0.195 ordered_channels.append(chan_info) data_dtype +=[(name, 'uint16', BLOCK_SIZE)] @@ -263,6 +274,11 @@ def read_rhs(filename): name = chan_info['native_channel_name'] chan_info_dc = dict(chan_info) chan_info_dc['native_channel_name'] = name+'_DC' + chan_info_dc['sampling_rate'] = sr + chan_info_dc['units'] = 'mV' + chan_info_dc['gain'] = 19.23 + chan_info_dc['offset'] = -512 * 19.23 + chan_info_dc['signal_type'] = 10 # put it in another group ordered_channels.append(chan_info_dc) data_dtype +=[(name+'_DC', 'uint16', BLOCK_SIZE)] @@ -270,6 +286,13 @@ def read_rhs(filename): name = chan_info['native_channel_name'] chan_info_stim = dict(chan_info) chan_info_stim['native_channel_name'] = name+'_STIM' + chan_info_stim['sampling_rate'] = sr + # stim channel are coplicated because they are coded + # with bits, they do not fit the gain/offset rawio strategy + chan_info_stim['units'] = '' + chan_info_stim['gain'] = 1. + chan_info_stim['offset'] = 0. + chan_info_stim['signal_type'] = 11 # put it in another group ordered_channels.append(chan_info_stim) data_dtype +=[(name+'_STIM', 'uint16', BLOCK_SIZE)] @@ -278,12 +301,18 @@ def read_rhs(filename): for sig_type in [3, 4, ]: for chan_info in channels_by_type[sig_type]: name = chan_info['native_channel_name'] + chan_info['sampling_rate'] = sr + chan_info['units'] = 'V' + chan_info['gain'] = 0.0003125 + chan_info['offset'] = -32768 * 0.0003125 ordered_channels.append(chan_info) data_dtype +=[(name, 'uint16', BLOCK_SIZE)] # 5: Digital input channel. # 6: Digital output channel. for sig_type in [5, 6]: + # at the moment theses channel are not in sig channel list + # but they are in the raw memamp if len(channels_by_type[sig_type]) > 0: name = {5:'DIGITAL-IN', 6:'DIGITAL-OUT' }[sig_type] data_dtype +=[(name, 'uint16', BLOCK_SIZE)] @@ -411,10 +440,9 @@ def read_rhd(filename): header_size = f.tell() - # TODO sampling_rate for auxilary channel - - # construct the data block dtype + sr = global_info['sampling_rate'] + # construct the data block dtype and reorder channels if version>='2.0': BLOCK_SIZE = 128 else: @@ -430,35 +458,70 @@ def read_rhd(filename): # 0: RHD2000 amplifier channel for chan_info in channels_by_type[0]: name = chan_info['native_channel_name'] + chan_info['sampling_rate'] = sr + chan_info['units'] = 'uV' + chan_info['gain'] = 0.195 + chan_info['offset'] = -32768 * 0.195 ordered_channels.append(chan_info) data_dtype +=[(name, 'uint16', BLOCK_SIZE)] # 1: RHD2000 auxiliary input channel for chan_info in channels_by_type[1]: name = chan_info['native_channel_name'] + chan_info['sampling_rate'] = sr / 4. + chan_info['units'] = 'V' + chan_info['gain'] = 0.0000374 + chan_info['offset'] = 0. ordered_channels.append(chan_info) data_dtype +=[(name, 'uint16', BLOCK_SIZE//4)] # 2: RHD2000 supply voltage channel for chan_info in channels_by_type[2]: name = chan_info['native_channel_name'] + chan_info['sampling_rate'] = sr / BLOCK_SIZE + chan_info['units'] = 'V' + chan_info['gain'] = 0.0000748 + chan_info['offset'] = 0. ordered_channels.append(chan_info) data_dtype +=[('supply_voltage', 'uint16')] + # temperature is not an official channel in the header for i in range(global_info['num_temp_sensor_channels']): - data_dtype +=[('temperature_{}'.format(i), 'uint16')] + chan_info = {'native_channel_name' : 'temperature', 'signal_type': 20 } + chan_info['sampling_rate'] = sr / BLOCK_SIZE + chan_info['units'] = 'Celsius' + chan_info['gain'] = 0.001 + chan_info['offset'] = 0. + ordered_channels.append(chan_info) + data_dtype +=[('temperature_{}'.format(i), 'int16')] + # 3: USB board ADC input channel for chan_info in channels_by_type[3]: name = chan_info['native_channel_name'] + chan_info['sampling_rate'] = sr + chan_info['units'] = 'V' + if global_info['eval_board_mode'] == 0: + chan_info['gain'] = 0.000050354 + chan_info['offset'] = 0. + elif global_info['eval_board_mode'] == 1: + chan_info['gain'] = 0.00015259 + chan_info['offset'] = -32768 * 0.00015259 + elif global_info['eval_board_mode'] == 13: + chan_info['gain'] = 0.0003125 + chan_info['offset'] = -32768 * 0.0003125 ordered_channels.append(chan_info) data_dtype +=[(name, 'uint16', BLOCK_SIZE)] # 4: USB board digital input channel # 5: USB board digital output channel for sig_type in [4, 5]: + # at the moment theses channel are not in sig channel list + # but they are in the raw memamp if len(channels_by_type[sig_type]) > 0: name = {4:'DIGITAL-IN', 5:'DIGITAL-OUT' }[sig_type] data_dtype +=[(name, 'uint16', BLOCK_SIZE)] return global_info, ordered_channels, data_dtype, header_size, BLOCK_SIZE + + diff --git a/neo/test/iotest/test_intanio.py b/neo/test/iotest/test_intanio.py new file mode 100644 index 000000000..036d19bc5 --- /dev/null +++ b/neo/test/iotest/test_intanio.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +""" +Tests of neo.io.intanio +""" + +# needed for python 3 compatibility +from __future__ import absolute_import, division + +import sys + +import unittest + +from neo.io import IntanIO +from neo.test.iotest.common_io_test import BaseTestIO + + +class TestIntanIO(BaseTestIO, unittest.TestCase, ): + ioclass = IntanIO + files_to_test = [] + files_to_download = [ + ] + + +if __name__ == "__main__": + unittest.main() From c64a00f26c28b317f64986ac4a7ec786df7e9fc9 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 7 Nov 2018 11:23:22 +0100 Subject: [PATCH 19/41] pep8 clean --- neo/io/intanio.py | 1 + neo/rawio/intanrawio.py | 186 +++++++++++++++++++--------------------- 2 files changed, 90 insertions(+), 97 deletions(-) diff --git a/neo/io/intanio.py b/neo/io/intanio.py index f7de06f48..3ec3a829a 100644 --- a/neo/io/intanio.py +++ b/neo/io/intanio.py @@ -7,6 +7,7 @@ class IntanIO(IntanRawIO, BaseFromRaw): __doc__ = IntanRawIO.__doc__ _prefered_signal_group_mode = 'group-by-same-units' + def __init__(self, filename): IntanRawIO.__init__(self, filename=filename) BaseFromRaw.__init__(self, filename) diff --git a/neo/rawio/intanrawio.py b/neo/rawio/intanrawio.py index 4f0d73f17..c1e6dc667 100644 --- a/neo/rawio/intanrawio.py +++ b/neo/rawio/intanrawio.py @@ -28,8 +28,6 @@ from distutils.version import LooseVersion as V - - class IntanRawIO(BaseRawIO): """ @@ -45,24 +43,26 @@ def _source_name(self): return self.filename def _parse_header(self): - + if self.filename.endswith('.rhs'): - self._global_info, self._ordered_channels, data_dtype, header_size, self.block_size = read_rhs(self.filename) + self._global_info, self._ordered_channels, data_dtype,\ + header_size, self.block_size = read_rhs(self.filename) elif self.filename.endswith('.rhd'): - self._global_info, self._ordered_channels, data_dtype, header_size, self.block_size = read_rhd(self.filename) + self._global_info, self._ordered_channels, data_dtype,\ + header_size, self.block_size = read_rhd(self.filename) # memmap raw data with the complicated structured dtype self._raw_data = np.memmap(self.filename, dtype=data_dtype, mode='r', offset=header_size) - + # check timestamp continuity timestamp = self._raw_data['timestamp'].flatten() - assert np.all(np.diff(timestamp)==1) - + assert np.all(np.diff(timestamp) == 1), 'timestamp have gaps' + # signals sig_channels = [] for c, chan_info in enumerate(self._ordered_channels): name = chan_info['native_channel_name'] - chan_id = c # the chan_id have no meaning in intan + chan_id = c # the chan_id have no meaning in intan if chan_info['signal_type'] == 20: # exception for temperature sig_dtype = 'int16' @@ -73,7 +73,7 @@ def _parse_header(self): sig_dtype, chan_info['units'], chan_info['gain'], chan_info['offset'], chan_info['signal_type'])) sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype) - + self._max_sampling_rate = np.max(sig_channels['sampling_rate']) self._max_sigs_length = self._raw_data.size * self.block_size @@ -137,7 +137,6 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, chann return sigs_chunk - def read_qstring(f): length = np.fromfile(f, dtype='uint32', count=1)[0] if length == 0xFFFFFFFF or length == 0: @@ -145,7 +144,7 @@ def read_qstring(f): txt = f.read(length).decode('utf-16') return txt - + def read_variable_header(f, header): info = {} for field_name, field_type in header: @@ -157,21 +156,19 @@ def read_variable_header(f, header): return info - - ############### # RHS ZONE -rhs_global_header =[ +rhs_global_header = [ ('magic_number', 'uint32'), # 0xD69127AC - + ('major_version', 'int16'), ('minor_version', 'int16'), - + ('sampling_rate', 'float32'), - + ('dsp_enabled', 'int16'), - + ('actual_dsp_cutoff_frequency', 'float32'), ('actual_lower_bandwidth', 'float32'), ('actual_lower_settle_bandwidth', 'float32'), @@ -180,15 +177,15 @@ def read_variable_header(f, header): ('desired_lower_bandwidth', 'float32'), ('desired_lower_settle_bandwidth', 'float32'), ('desired_upper_bandwidth', 'float32'), - + ('notch_filter_mode', 'int16'), - + ('desired_impedance_test_frequency', 'float32'), ('actual_impedance_test_frequency', 'float32'), - + ('amp_settle_mode', 'int16'), ('charge_recovery_mode', 'int16'), - + ('stim_step_size', 'float32'), ('recovery_current_limit', 'float32'), ('recovery_target_voltage', 'float32'), @@ -196,13 +193,13 @@ def read_variable_header(f, header): ('note1', 'QString'), ('note2', 'QString'), ('note3', 'QString'), - + ('dc_amplifier_data_saved', 'int16'), - + ('board_mode', 'int16'), - + ('ref_channel_name', 'QString'), - + ('nb_signal_group', 'int16'), ] @@ -222,7 +219,7 @@ def read_variable_header(f, header): ('signal_type', 'int16'), ('channel_enabled', 'int16'), ('chip_channel_num', 'int16'), - ('command_stream', 'int16'), ####### + ('command_stream', 'int16'), ('board_stream_num', 'int16'), ('spike_scope_trigger_mode', 'int16'), ('spike_scope_voltage_thresh', 'int16'), @@ -230,35 +227,34 @@ def read_variable_header(f, header): ('spike_scope_digital_edge_polarity', 'int16'), ('electrode_impedance_magnitude', 'float32'), ('electrode_impedance_phase', 'float32'), - ] def read_rhs(filename): - BLOCK_SIZE = 128 # sample per block + BLOCK_SIZE = 128 # sample per block with open(filename, mode='rb') as f: global_info = read_variable_header(f, rhs_global_header) - - channels_by_type = {k:[] for k in [0,3, 4, 5, 6]} + + channels_by_type = {k: [] for k in [0, 3, 4, 5, 6]} for g in range(global_info['nb_signal_group']): group_info = read_variable_header(f, rhs_signal_group_header) - + if bool(group_info['signal_group_enabled']): for c in range(group_info['channel_num']): chan_info = read_variable_header(f, rhs_signal_channel_header) assert chan_info['signal_type'] not in (1, 2) if bool(chan_info['channel_enabled']): channels_by_type[chan_info['signal_type']].append(chan_info) - + header_size = f.tell() - + sr = global_info['sampling_rate'] - + # construct dtype by re-ordering channels by types ordered_channels = [] data_dtype = [('timestamp', 'int32', BLOCK_SIZE)] - + # 0: RHS2000 amplifier channel. for chan_info in channels_by_type[0]: name = chan_info['native_channel_name'] @@ -267,8 +263,8 @@ def read_rhs(filename): chan_info['gain'] = 0.195 chan_info['offset'] = -32768 * 0.195 ordered_channels.append(chan_info) - data_dtype +=[(name, 'uint16', BLOCK_SIZE)] - + data_dtype += [(name, 'uint16', BLOCK_SIZE)] + if bool(global_info['dc_amplifier_data_saved']): for chan_info in channels_by_type[0]: name = chan_info['native_channel_name'] @@ -278,9 +274,9 @@ def read_rhs(filename): chan_info_dc['units'] = 'mV' chan_info_dc['gain'] = 19.23 chan_info_dc['offset'] = -512 * 19.23 - chan_info_dc['signal_type'] = 10 # put it in another group + chan_info_dc['signal_type'] = 10 # put it in another group ordered_channels.append(chan_info_dc) - data_dtype +=[(name+'_DC', 'uint16', BLOCK_SIZE)] + data_dtype += [(name+'_DC', 'uint16', BLOCK_SIZE)] for chan_info in channels_by_type[0]: name = chan_info['native_channel_name'] @@ -292,9 +288,9 @@ def read_rhs(filename): chan_info_stim['units'] = '' chan_info_stim['gain'] = 1. chan_info_stim['offset'] = 0. - chan_info_stim['signal_type'] = 11 # put it in another group + chan_info_stim['signal_type'] = 11 # put it in another group ordered_channels.append(chan_info_stim) - data_dtype +=[(name+'_STIM', 'uint16', BLOCK_SIZE)] + data_dtype += [(name+'_STIM', 'uint16', BLOCK_SIZE)] # 3: Analog input channel. # 4: Analog output channel. @@ -306,7 +302,7 @@ def read_rhs(filename): chan_info['gain'] = 0.0003125 chan_info['offset'] = -32768 * 0.0003125 ordered_channels.append(chan_info) - data_dtype +=[(name, 'uint16', BLOCK_SIZE)] + data_dtype += [(name, 'uint16', BLOCK_SIZE)] # 5: Digital input channel. # 6: Digital output channel. @@ -314,44 +310,44 @@ def read_rhs(filename): # at the moment theses channel are not in sig channel list # but they are in the raw memamp if len(channels_by_type[sig_type]) > 0: - name = {5:'DIGITAL-IN', 6:'DIGITAL-OUT' }[sig_type] - data_dtype +=[(name, 'uint16', BLOCK_SIZE)] - + name = {5: 'DIGITAL-IN', 6: 'DIGITAL-OUT'}[sig_type] + data_dtype += [(name, 'uint16', BLOCK_SIZE)] + return global_info, ordered_channels, data_dtype, header_size, BLOCK_SIZE ############### # RHD ZONE -rhd_global_header_base =[ - ('magic_number', 'uint32'), # 0xC6912702 +rhd_global_header_base = [ + ('magic_number', 'uint32'), # 0xC6912702 ('major_version', 'int16'), ('minor_version', 'int16'), ] -rhd_global_header_part1 =[ +rhd_global_header_part1 = [ ('sampling_rate', 'float32'), - + ('dsp_enabled', 'int16'), - + ('actual_dsp_cutoff_frequency', 'float32'), ('actual_lower_bandwidth', 'float32'), ('actual_upper_bandwidth', 'float32'), ('desired_dsp_cutoff_frequency', 'float32'), ('desired_lower_bandwidth', 'float32'), ('desired_upper_bandwidth', 'float32'), - + ('notch_filter_mode', 'int16'), - + ('desired_impedance_test_frequency', 'float32'), ('actual_impedance_test_frequency', 'float32'), - + ('note1', 'QString'), ('note2', 'QString'), ('note3', 'QString'), - - ('nb_temp_sensor', 'int16'), + + ('nb_temp_sensor', 'int16'), ] rhd_global_header_v11 = [ @@ -393,22 +389,21 @@ def read_rhs(filename): ('spike_scope_digital_edge_polarity', 'int16'), ('electrode_impedance_magnitude', 'float32'), ('electrode_impedance_phase', 'float32'), - ] def read_rhd(filename): with open(filename, mode='rb') as f: - + global_info = read_variable_header(f, rhd_global_header_base) - + version = V('{major_version}.{minor_version}'.format(global_info)) - print(version) - + # print(version) + # the header size depend on the version :-( - header = list(rhd_global_header_part1) # make a copy - - if version>='1.1': + header = list(rhd_global_header_part1) # make a copy + + if version >= '1.1': header = header + rhd_global_header_v11 else: global_info['num_temp_sensor_channels'] = 0 @@ -417,21 +412,21 @@ def read_rhd(filename): header = header + rhd_global_header_v13 else: global_info['eval_board_mode'] = 0 - + if version >= '2.0': header = header + rhd_global_header_v20 else: global_info['reference_channel'] = '' - + header = header + rhd_global_header_final - + global_info.update(read_variable_header(f, header)) - + # read channel group and channel header - channels_by_type = {k:[] for k in [0, 1, 2, 3, 4, 5,]} + channels_by_type = {k: [] for k in [0, 1, 2, 3, 4, 5]} for g in range(global_info['nb_signal_group']): group_info = read_variable_header(f, rhd_signal_group_header) - + if bool(group_info['signal_group_enabled']): for c in range(group_info['channel_num']): chan_info = read_variable_header(f, rhd_signal_channel_header) @@ -439,18 +434,18 @@ def read_rhd(filename): channels_by_type[chan_info['signal_type']].append(chan_info) header_size = f.tell() - + sr = global_info['sampling_rate'] - + # construct the data block dtype and reorder channels - if version>='2.0': + if version >= '2.0': BLOCK_SIZE = 128 else: - BLOCK_SIZE = 60 # 256 channels + BLOCK_SIZE = 60 # 256 channels ordered_channels = [] - - if version>='1.2': + + if version >= '1.2': data_dtype = [('timestamp', 'int32', BLOCK_SIZE)] else: data_dtype = [('timestamp', 'uint32', BLOCK_SIZE)] @@ -463,8 +458,8 @@ def read_rhd(filename): chan_info['gain'] = 0.195 chan_info['offset'] = -32768 * 0.195 ordered_channels.append(chan_info) - data_dtype +=[(name, 'uint16', BLOCK_SIZE)] - + data_dtype += [(name, 'uint16', BLOCK_SIZE)] + # 1: RHD2000 auxiliary input channel for chan_info in channels_by_type[1]: name = chan_info['native_channel_name'] @@ -473,29 +468,28 @@ def read_rhd(filename): chan_info['gain'] = 0.0000374 chan_info['offset'] = 0. ordered_channels.append(chan_info) - data_dtype +=[(name, 'uint16', BLOCK_SIZE//4)] - + data_dtype += [(name, 'uint16', BLOCK_SIZE//4)] + # 2: RHD2000 supply voltage channel for chan_info in channels_by_type[2]: name = chan_info['native_channel_name'] chan_info['sampling_rate'] = sr / BLOCK_SIZE chan_info['units'] = 'V' - chan_info['gain'] = 0.0000748 + chan_info['gain'] = 0.0000748 chan_info['offset'] = 0. ordered_channels.append(chan_info) - data_dtype +=[('supply_voltage', 'uint16')] - + data_dtype += [('supply_voltage', 'uint16')] + # temperature is not an official channel in the header for i in range(global_info['num_temp_sensor_channels']): - chan_info = {'native_channel_name' : 'temperature', 'signal_type': 20 } + chan_info = {'native_channel_name': 'temperature', 'signal_type': 20} chan_info['sampling_rate'] = sr / BLOCK_SIZE chan_info['units'] = 'Celsius' - chan_info['gain'] = 0.001 + chan_info['gain'] = 0.001 chan_info['offset'] = 0. ordered_channels.append(chan_info) - data_dtype +=[('temperature_{}'.format(i), 'int16')] - - + data_dtype += [('temperature_{}'.format(i), 'int16')] + # 3: USB board ADC input channel for chan_info in channels_by_type[3]: name = chan_info['native_channel_name'] @@ -506,22 +500,20 @@ def read_rhd(filename): chan_info['offset'] = 0. elif global_info['eval_board_mode'] == 1: chan_info['gain'] = 0.00015259 - chan_info['offset'] = -32768 * 0.00015259 + chan_info['offset'] = -32768 * 0.00015259 elif global_info['eval_board_mode'] == 13: chan_info['gain'] = 0.0003125 chan_info['offset'] = -32768 * 0.0003125 ordered_channels.append(chan_info) - data_dtype +=[(name, 'uint16', BLOCK_SIZE)] - + data_dtype += [(name, 'uint16', BLOCK_SIZE)] + # 4: USB board digital input channel # 5: USB board digital output channel for sig_type in [4, 5]: # at the moment theses channel are not in sig channel list # but they are in the raw memamp if len(channels_by_type[sig_type]) > 0: - name = {4:'DIGITAL-IN', 5:'DIGITAL-OUT' }[sig_type] - data_dtype +=[(name, 'uint16', BLOCK_SIZE)] - - return global_info, ordered_channels, data_dtype, header_size, BLOCK_SIZE - + name = {4: 'DIGITAL-IN', 5: 'DIGITAL-OUT'}[sig_type] + data_dtype += [(name, 'uint16', BLOCK_SIZE)] + return global_info, ordered_channels, data_dtype, header_size, BLOCK_SIZE From c3832c412e5fd94f71dec457042d3ad6770b8452 Mon Sep 17 00:00:00 2001 From: Andrew Davison Date: Thu, 15 Nov 2018 15:43:27 +0100 Subject: [PATCH 20/41] Docstring fixes --- neo/rawio/openephysrawio.py | 55 +++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/neo/rawio/openephysrawio.py b/neo/rawio/openephysrawio.py index 13993f7cc..c529c58a0 100644 --- a/neo/rawio/openephysrawio.py +++ b/neo/rawio/openephysrawio.py @@ -20,41 +20,42 @@ class OpenEphysRawIO(BaseRawIO): """ - OpenEphys GUI software offer several data format see + OpenEphys GUI software offers several data formats, see https://open-ephys.atlassian.net/wiki/spaces/OEW/pages/491632/Data+format - This class implement the legacy OpenEphys format here + This class implements the legacy OpenEphys format here https://open-ephys.atlassian.net/wiki/spaces/OEW/pages/65667092/Open+Ephys+format - OpenEphy group already propose some tools here: + The OpenEphys group already proposes some tools here: https://github.com/open-ephys/analysis-tools/blob/master/OpenEphys.py - but there is no package at pypi and read everything in memory. + but (i) there is no package at PyPI and (ii) those tools read everything in memory. - Its directory based with several files : + The format is directory based with several files: * .continuous * .events * .spikes - This implementation of class is based on: + This implementation is based on: * this code https://github.com/open-ephys/analysis-tools/blob/master/Python3/OpenEphys.py - done by Dan Denman and Josh Siegle - * a previous PR done by Cristian Tatarau and Charite Berlin - Contrary to previous code to open this format here all data use memmap so it should + written by Dan Denman and Josh Siegle + * a previous PR by Cristian Tatarau at Charité Berlin + + In contrast to previous code for reading this format, here all data use memmap so it should be super fast and light compared to legacy code. When the acquisition is stopped and restarted then files are named *_2, *_3. - In that case this class create a new Segment. Note that timestamps is reseted in this situation. + In that case this class creates a new Segment. Note that timestamps are reset in this situation. Limitation : - * Work only if all continuous channels have the same samplerate. Wich is a resonnable hypothesis. - * When the recording is stopped and restarted all continuous files will contains gaps. - Ideally this would lead to a new Segment but it is not implemented due to complexity. - In that case it will raise an error. + * Works only if all continuous channels have the same sampling rate, which is a reasonable hypothesis. + * When the recording is stopped and restarted all continuous files will contain gaps. + Ideally this would lead to a new Segment but this use case is not implemented due to its complexity. + Instead it will raise an error. Special cases: - * Normaly all continuous files have the same first timestamp and length. In situation - where it is not the case all files are clip to the smallest one so that they are all aligned. - In that case a wrning is emited. + * Normaly all continuous files have the same first timestamp and length. In situations + where it is not the case all files are clipped to the smallest one so that they are all aligned, + and a warning is emitted. """ extensions = [] rawmode = 'one-dir' @@ -104,7 +105,7 @@ def _parse_header(self): # check for continuity (no gaps) diff = np.diff(data_chan['timestamp']) assert np.all(diff == RECORD_SIZE), \ - 'Not continuous timestamps for {}. Maybe because recording is pause/stop.'.format(continuous_filename) + 'Not continuous timestamps for {}. Maybe because recording was paused/stopped.'.format(continuous_filename) if seg_index == 0: # add in channel list @@ -117,7 +118,7 @@ def _parse_header(self): if not all(all_sigs_length[0] == e for e in all_sigs_length) or\ not all(all_first_timestamps[0] == e for e in all_first_timestamps): - self.logger.warning('Continuous files are not timestamps aligned. So there clip then to aligned') + self.logger.warning('Continuous files do not have aligned timestamps; clipping to make them aligned.') first, last = -np.inf, np.inf for chan_id in self._sigs_memmap[seg_index]: @@ -187,7 +188,7 @@ def _parse_header(self): self._spike_sampling_rate = spike_info['sampleRate'] else: assert self._spike_sampling_rate == spike_info['sampleRate'],\ - 'mismatch in spike sampleRate' + 'mismatch in spike sampling rate' # scan all to detect several all unique(sorted_ids) all_sorted_ids = [] @@ -383,10 +384,10 @@ def _rescale_epoch_duration(self, raw_duration, dtype): def make_spikes_dtype(filename): """ - Given the spike file make the appropriate dtype that depend of: - * N number of channel - * M sample per spike - See doc of file format. + Given the spike file make the appropriate dtype that depends on: + * N - number of channels + * M - samples per spike + See documentation of file format. """ # strangly the header do not have the sample size @@ -420,10 +421,10 @@ def make_spikes_dtype(filename): def explore_folder(dirname): """ - This explore a folder and disptach coninuous, event and spikes + This explores a folder and dispatch coninuous, event and spikes files by segment (aka recording session). - The nb of segment is check with this rules + The number of segments is checked with these rules "100_CH0.continuous" ---> seg_index 0 "100_CH0_2.continuous" ---> seg_index 1 "100_CH0_N.continuous" ---> seg_index N-1 @@ -487,7 +488,7 @@ def explore_folder(dirname): def read_file_header(filename): """Read header information from the first 1024 bytes of an OpenEphys file. - See doc. + See docs. """ header = {} with open(filename, mode='rb') as f: From 5377c3a4754653e943f14df58b234b53ad577d8d Mon Sep 17 00:00:00 2001 From: Andrew Davison Date: Thu, 15 Nov 2018 16:13:44 +0100 Subject: [PATCH 21/41] Docstring fixes --- neo/rawio/rawmcsrawio.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/neo/rawio/rawmcsrawio.py b/neo/rawio/rawmcsrawio.py index 6bd5ba758..063e08f6e 100644 --- a/neo/rawio/rawmcsrawio.py +++ b/neo/rawio/rawmcsrawio.py @@ -3,15 +3,14 @@ Class for reading data from "Raw" Multi Channel System (MCS) format. This format is NOT the native MCS format (*.mcd). This format is a raw format with an internal binary header exported by the -"MC_DataTool binary conversion" with the option header slected. +"MC_DataTool binary conversion" with the option header selected. -The internal header contain sampling rate, channel names, gain and units. -Not so bad : everything that neo need, so this IO is without parameters. - -If some MCS custumers read this you should lobby to get the real specification -of the real MCS format (.mcd) and so the MCSRawIO could be done instead of this -ersatz. +The internal header contains sampling rate, channel names, gain and units. +Not so bad: everything that Neo needs, so this IO is without parameters. +If some MCS customers read this you should lobby to get the real specification +of the real MCS format (.mcd), then an IO module for the native MCS format +could be written instead of this ersatz. Author: Samuel Garcia """ @@ -98,7 +97,8 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, chann def parse_mcs_raw_header(filename): """ - This is a mix with stuff on github. + This is a from-scratch implementation, with some inspiration + (but no code) taken from the following files: https://github.com/spyking-circus/spyking-circus/blob/master/circus/files/mcs_raw_binary.py https://github.com/jeffalstott/Multi-Channel-Systems-Import/blob/master/MCS.py """ From 36eea92510ba4d28331722a8f3395d6a7ce2243a Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 15 Nov 2018 16:37:16 +0100 Subject: [PATCH 22/41] Debug with rhd and files at gin. --- neo/rawio/intanrawio.py | 41 +++++++++++++++++++----------- neo/rawio/tests/test_intanrawio.py | 2 ++ neo/test/iotest/test_intanio.py | 6 +++-- 3 files changed, 32 insertions(+), 17 deletions(-) diff --git a/neo/rawio/intanrawio.py b/neo/rawio/intanrawio.py index c1e6dc667..570abb859 100644 --- a/neo/rawio/intanrawio.py +++ b/neo/rawio/intanrawio.py @@ -37,6 +37,7 @@ class IntanRawIO(BaseRawIO): def __init__(self, filename=''): BaseRawIO.__init__(self) + self.filename = filename def _source_name(self): @@ -46,10 +47,10 @@ def _parse_header(self): if self.filename.endswith('.rhs'): self._global_info, self._ordered_channels, data_dtype,\ - header_size, self.block_size = read_rhs(self.filename) + header_size, self._block_size = read_rhs(self.filename) elif self.filename.endswith('.rhd'): self._global_info, self._ordered_channels, data_dtype,\ - header_size, self.block_size = read_rhd(self.filename) + header_size, self._block_size = read_rhd(self.filename) # memmap raw data with the complicated structured dtype self._raw_data = np.memmap(self.filename, dtype=data_dtype, mode='r', offset=header_size) @@ -75,7 +76,7 @@ def _parse_header(self): sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype) self._max_sampling_rate = np.max(sig_channels['sampling_rate']) - self._max_sigs_length = self._raw_data.size * self.block_size + self._max_sigs_length = self._raw_data.size * self._block_size # No events event_channels = [] @@ -120,19 +121,30 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, chann if i_stop is None: i_stop = self._get_signal_size(block_index, seg_index, channel_indexes) - block_start = i_start // self.block_size - block_stop = i_stop // self.block_size + 1 - sl0 = i_start % self.block_size - sl1 = sl0 + (i_stop - i_start) - if channel_indexes is None: channel_indexes = slice(None) channel_names = self.header['signal_channels'][channel_indexes]['name'] + shape = self._raw_data[channel_names[0]].shape + + # some channel (temperature) have 1D field so shape 1D + # because 1 sample per block + if len(shape) == 2: + # this is the general case with 2D + block_size = shape[1] + block_start = i_start // block_size + block_stop = i_stop // block_size + 1 + + sl0 = i_start % block_size + sl1 = sl0 + (i_stop - i_start) + sigs_chunk = np.zeros((i_stop - i_start, len(channel_names)), dtype='uint16') for i, chan_name in enumerate(channel_names): data_chan = self._raw_data[chan_name] - sigs_chunk[:, i] = data_chan[block_start:block_stop].flatten()[sl0:sl1] + if len(shape) == 1: + sigs_chunk[:, i] = data_chan[i_start:i_stop] + else: + sigs_chunk[:, i] = data_chan[block_start:block_stop].flatten()[sl0:sl1] return sigs_chunk @@ -347,7 +359,6 @@ def read_rhs(filename): ('note2', 'QString'), ('note3', 'QString'), - ('nb_temp_sensor', 'int16'), ] rhd_global_header_v11 = [ @@ -397,8 +408,7 @@ def read_rhd(filename): global_info = read_variable_header(f, rhd_global_header_base) - version = V('{major_version}.{minor_version}'.format(global_info)) - # print(version) + version = V('{major_version}.{minor_version}'.format(**global_info)) # the header size depend on the version :-( header = list(rhd_global_header_part1) # make a copy @@ -478,17 +488,18 @@ def read_rhd(filename): chan_info['gain'] = 0.0000748 chan_info['offset'] = 0. ordered_channels.append(chan_info) - data_dtype += [('supply_voltage', 'uint16')] + data_dtype += [(name, 'uint16')] # temperature is not an official channel in the header for i in range(global_info['num_temp_sensor_channels']): - chan_info = {'native_channel_name': 'temperature', 'signal_type': 20} + name = 'temperature_{}'.format(i) + chan_info = {'native_channel_name': name, 'signal_type': 20} chan_info['sampling_rate'] = sr / BLOCK_SIZE chan_info['units'] = 'Celsius' chan_info['gain'] = 0.001 chan_info['offset'] = 0. ordered_channels.append(chan_info) - data_dtype += [('temperature_{}'.format(i), 'int16')] + data_dtype += [(name, 'int16')] # 3: USB board ADC input channel for chan_info in channels_by_type[3]: diff --git a/neo/rawio/tests/test_intanrawio.py b/neo/rawio/tests/test_intanrawio.py index ce99a7b73..cb11989fa 100644 --- a/neo/rawio/tests/test_intanrawio.py +++ b/neo/rawio/tests/test_intanrawio.py @@ -13,6 +13,8 @@ class TestIntanRawIO(BaseTestRawIO, unittest.TestCase, ): rawioclass = IntanRawIO files_to_download = [ + 'intan_rhs_test_1.rhs', + 'intan_rhd_test_1.rhd', ] entities_to_test = files_to_download diff --git a/neo/test/iotest/test_intanio.py b/neo/test/iotest/test_intanio.py index 036d19bc5..948bade1e 100644 --- a/neo/test/iotest/test_intanio.py +++ b/neo/test/iotest/test_intanio.py @@ -16,9 +16,11 @@ class TestIntanIO(BaseTestIO, unittest.TestCase, ): ioclass = IntanIO - files_to_test = [] files_to_download = [ - ] + 'intan_rhs_test_1.rhs', + 'intan_rhd_test_1.rhd', + ] + files_to_test = files_to_download if __name__ == "__main__": From 3801df9b3be53500d45670024ff8df261a67c3b4 Mon Sep 17 00:00:00 2001 From: Michael Denker Date: Thu, 15 Nov 2018 17:30:02 +0100 Subject: [PATCH 23/41] Fixed a string to be 'b' so that the decode() function works in Python3 --- neo/io/blackrockio_v4.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/io/blackrockio_v4.py b/neo/io/blackrockio_v4.py index 64331df42..97faea687 100644 --- a/neo/io/blackrockio_v4.py +++ b/neo/io/blackrockio_v4.py @@ -1267,7 +1267,7 @@ def __get_nsx_param_variant_a(self, param_name, nsx_nb): nsx_parameters = { 'labels': labels, 'units': np.array( - ['uV'] * + [b'uV'] * self.__nsx_basic_header[nsx_nb]['channel_count']), 'min_analog_val': -1 * np.array(dig_factor), 'max_analog_val': np.array(dig_factor), From 05d47fa97a79fa7279b24f22b005bb881106aef0 Mon Sep 17 00:00:00 2001 From: Michael Denker Date: Thu, 15 Nov 2018 17:38:45 +0100 Subject: [PATCH 24/41] Fixed a spelling mistake in the test doku --- neo/test/iotest/test_blackrockio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/test/iotest/test_blackrockio.py b/neo/test/iotest/test_blackrockio.py index 68ec51e42..57249de84 100644 --- a/neo/test/iotest/test_blackrockio.py +++ b/neo/test/iotest/test_blackrockio.py @@ -191,7 +191,7 @@ def test_load_muliple_nsx(self): @unittest.skipUnless(HAVE_SCIPY, "requires scipy") def test_compare_blackrockio_with_matlabloader_v21(self): """ - This test compares the output of ReachGraspIO.read_block() with the + This test compares the output of BlackrockIO.read_block() with the output generated by a Matlab implementation of a Blackrock file reader provided by the company. The output for comparison is provided in a .mat file created by the script create_data_matlab_blackrock.m. From e159957437929518643a7339e668012b514307fc Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 15 Nov 2018 20:48:58 +0100 Subject: [PATCH 25/41] Doc for axonio read_protocol --- neo/io/axonio.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/neo/io/axonio.py b/neo/io/axonio.py index f2b38aa4d..b375507e3 100644 --- a/neo/io/axonio.py +++ b/neo/io/axonio.py @@ -15,6 +15,28 @@ class AxonIO(AxonRawIO, BaseFromRaw): - abf = Axon binary file - atf is a text file based format from axon that could be read by AsciiIO (but this file is less efficient.) + + Here an important note from erikli@github for user who want to get the : + With Axon ABF2 files, the information that you need to recapitulate the original stimulus waveform (both digital and analog) is contained in multiple places. + + - `AxonIO._axon_info['protocol']` -- things like number of samples in episode + - `AxonIO.axon_info['section']['ADCSection']` | `AxonIO.axon_info['section']['DACSection']` -- things about the number of channels and channel properties + - `AxonIO._axon_info['protocol']['nActiveDACChannel']` -- bitmask specifying which DACs are actually active + - `AxonIO._axon_info['protocol']['nDigitalEnable']` -- bitmask specifying which set of Epoch timings should be used to specify the duration of digital outputs + - `AxonIO._axon_info['dictEpochInfoPerDAC']` -- dict of dict. First index is DAC channel and second index is Epoch number (i.e. information about Epoch A in Channel 2 would be in `AxonIO._axon_info['dictEpochInfoPerDAC'][2][0]`) + - `AxonIO._axon_info['EpochInfo']` -- list of dicts containing information about each Epoch's digital out pattern. Digital out is a bitmask with least significant bit corresponding to Digital Out 0 + - `AxonIO._axon_info['listDACInfo']` -- information about DAC name, scale factor, holding level, etc + - `AxonIO._t_starts` -- start time of each sweep in a unified time basis + - `AxonIO._sampling_rate` + + The current AxonIO.read_protocol() method utilizes a subset of these. + In particular I know it doesn't consider `nDigitalEnable`, `EpochInfo`, or `nActiveDACChannel` and it doesn't account + for different types of Epochs offered by Clampex/pClamp other than discrete steps (such as ramp, pulse train, etc and + encoded by `nEpochType` in the EpochInfoPerDAC section). I'm currently parsing a superset of the properties used + by read_protocol() in my analysis scripts, but that code still doesn't parse the full information and isn't in a state + where it could be committed and I can't currently prioritize putting together all the code that would parse the full + set of data. The `AxonIO._axon_info['EpochInfo']` section doesn't currently exist. + """ _prefered_signal_group_mode = 'split-all' From 0930eab569aa5c91037c80f316dff6ade11f3a39 Mon Sep 17 00:00:00 2001 From: Andrew Davison Date: Fri, 16 Nov 2018 09:05:15 +0100 Subject: [PATCH 26/41] Fix some bugs which I see on my laptop, although apparently they are not caught by Travis or CircleCI? My env: conda list # packages in environment at /Users/andrew/anaconda/envs/neo: # alabaster 0.7.6 py27_0 http://repo.continuum.io/pkgs/free/osx-64/alabaster-0.7.6-py27_0.tar.bz2 allensdk 0.12.0 babel 2.1.1 py27_0 http://repo.continuum.io/pkgs/free/osx-64/babel-2.1.1-py27_0.tar.bz2 certifi 2016.2.28 py27_0 defaults chardet 3.0.4 coverage 3.7.1 py27_0 http://repo.continuum.io/pkgs/free/osx-64/coverage-3.7.1-py27_0.tar.bz2 cython 0.27 dateutil 2.1 py27_2 docutils 0.12 py27_0 http://repo.continuum.io/pkgs/free/osx-64/docutils-0.12-py27_0.tar.bz2 enum34 1.1.6 freetype 2.4.10 1 funcsigs 1.0.2 py27_0 defaults future 0.16.0 h5py 2.5.0 np110py27_4 http://repo.continuum.io/pkgs/free/osx-64/h5py-2.5.0-np110py27_4.tar.bz2 hdf5 1.8.15.1 2 http://repo.continuum.io/pkgs/free/osx-64/hdf5-1.8.15.1-2.tar.bz2 idna 2.7 igor 0.2 ipython 2.3.0 py27_0 http://repo.continuum.io/pkgs/free/osx-64/ipython-2.3.0-py27_0.tar.bz2 jinja2 2.8 py27_0 http://repo.continuum.io/pkgs/free/osx-64/jinja2-2.8-py27_0.tar.bz2 klusta 3.0.16 libpng 1.5.13 1 markupsafe 0.23 py27_0 http://repo.continuum.io/pkgs/free/osx-64/markupsafe-0.23-py27_0.tar.bz2 matplotlib 1.4.2 np19py27_0 http://repo.continuum.io/pkgs/free/osx-64/matplotlib-1.4.2-np19py27_0.tar.bz2 mkl 11.3.1 0 http://repo.continuum.io/pkgs/free/osx-64/mkl-11.3.1-0.tar.bz2 mock 2.0.0 py27_0 defaults neo (/Users/andrew/dev/analysis/neo) 0.7.0.dev0 neurom 1.4.8 nixio 1.4.2 nose 1.3.4 py27_0 http://repo.continuum.io/pkgs/free/osx-64/nose-1.3.4-py27_0.tar.bz2 nsdf 0.1 numexpr 2.3.1 np19py27_0 http://repo.continuum.io/pkgs/free/osx-64/numexpr-2.3.1-np19py27_0.tar.bz2 numpy 1.10.4 py27_0 http://repo.continuum.io/pkgs/free/osx-64/numpy-1.10.4-py27_0.tar.bz2 nwb (/Users/andrew/packages/nwb_api-python) 1.0.4b0 openssl 1.0.2l 0 defaults pandas 0.18.1 pbr 1.10.0 py27_0 defaults pip 9.0.1 py27_1 defaults pkginfo 1.4.2 pygments 2.0.2 py27_0 http://repo.continuum.io/pkgs/free/osx-64/pygments-2.0.2-py27_0.tar.bz2 pylru 1.1.0 pynrrd 0.2.1 pyparsing 2.0.1 py27_0 pytables 3.1.1 np19py27_0 http://repo.continuum.io/pkgs/free/osx-64/pytables-3.1.1-np19py27_0.tar.bz2 python 2.7.13 0 defaults python-dateutil 1.5 python.app 1.2 py27_3 http://repo.continuum.io/pkgs/free/osx-64/python.app-1.2-py27_3.tar.bz2 pytz 2015.7 py27_0 http://repo.continuum.io/pkgs/free/osx-64/pytz-2015.7-py27_0.tar.bz2 pyyaml 3.12 quantities (/Users/andrew/anaconda/envs/neo/lib/python2.7/site-packages) 0.12.1 readline 6.2 2 requests 2.19.1 requests-toolbelt 0.8.0 scipy 0.14.0 np19py27_0 http://repo.continuum.io/pkgs/free/osx-64/scipy-0.14.0-np19py27_0.tar.bz2 setuptools 36.4.0 py27_0 defaults six 1.10.0 py27_0 http://repo.continuum.io/pkgs/free/osx-64/six-1.10.0-py27_0.tar.bz2 snowballstemmer 1.2.0 py27_0 http://repo.continuum.io/pkgs/free/osx-64/snowballstemmer-1.2.0-py27_0.tar.bz2 sphinx 1.3.1 py27_0 http://repo.continuum.io/pkgs/free/osx-64/sphinx-1.3.1-py27_0.tar.bz2 sphinx-rtd-theme 0.1.7 sphinx_rtd_theme 0.1.7 py27_0 http://repo.continuum.io/pkgs/free/osx-64/sphinx_rtd_theme-0.1.7-py27_0.tar.bz2 sqlite 3.13.0 0 defaults tables 3.1.1 tk 8.5.18 0 http://repo.continuum.io/pkgs/free/osx-64/tk-8.5.18-0.tar.bz2 tqdm 4.19.5 twine 1.11.0 urllib3 1.23 wheel 0.29.0 py27_0 http://repo.continuum.io/pkgs/free/osx-64/wheel-0.29.0-py27_0.tar.bz2 zlib 1.2.11 0 defaults --- neo/core/epoch.py | 8 ++++++++ neo/test/coretest/test_analogsignal.py | 4 ++-- neo/test/coretest/test_irregularysampledsignal.py | 4 ++-- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/neo/core/epoch.py b/neo/core/epoch.py index df3354578..b4aa1eb1a 100644 --- a/neo/core/epoch.py +++ b/neo/core/epoch.py @@ -179,6 +179,14 @@ def __getitem__(self, i): obj.labels = self.labels[i] return obj + def __getslice__(self, i, j): + ''' + Get a slice from :attr:`i` to :attr:`j`.attr[0] + + Doesn't get called in Python 3, :meth:`__getitem__` is called instead + ''' + return self.__getitem__(slice(i, j)) + @property def times(self): return pq.Quantity(self) diff --git a/neo/test/coretest/test_analogsignal.py b/neo/test/coretest/test_analogsignal.py index 56dd67870..c0f9e543e 100644 --- a/neo/test/coretest/test_analogsignal.py +++ b/neo/test/coretest/test_analogsignal.py @@ -431,7 +431,7 @@ def test__comparison_as_indexing_double_trace(self): def test__indexing_keeps_order_across_channels(self): # AnalogSignals with 10 traces each having 5 samples (eg. data[0] = [0,10,20,30,40]) data = np.array([range(10), range(10, 20), range(20, 30), range(30, 40), range(40, 50)]) - mask = np.full((5, 10), fill_value=False) + mask = np.full((5, 10), fill_value=False, dtype=bool) # selecting one entry per trace mask[[0, 1, 0, 3, 0, 2, 4, 3, 1, 4], range(10)] = True @@ -441,7 +441,7 @@ def test__indexing_keeps_order_across_channels(self): def test__indexing_keeps_order_across_time(self): # AnalogSignals with 10 traces each having 5 samples (eg. data[0] = [0,10,20,30,40]) data = np.array([range(10), range(10, 20), range(20, 30), range(30, 40), range(40, 50)]) - mask = np.full((5, 10), fill_value=False) + mask = np.full((5, 10), fill_value=False, dtype=bool) # selecting two entries per trace temporal_ids = [0, 1, 0, 3, 1, 2, 4, 2, 1, 4] + [4, 3, 2, 1, 0, 1, 2, 3, 2, 1] mask[temporal_ids, list(range(10)) + list(range(10))] = True diff --git a/neo/test/coretest/test_irregularysampledsignal.py b/neo/test/coretest/test_irregularysampledsignal.py index 89717c745..17e5d43c7 100644 --- a/neo/test/coretest/test_irregularysampledsignal.py +++ b/neo/test/coretest/test_irregularysampledsignal.py @@ -292,7 +292,7 @@ def test__comparison_as_indexing_multi_trace(self): def test__indexing_keeps_order_across_channels(self): # AnalogSignals with 10 traces each having 5 samples (eg. data[0] = [0,10,20,30,40]) data = np.array([range(10), range(10, 20), range(20, 30), range(30, 40), range(40, 50)]) - mask = np.full((5, 10), fill_value=False) + mask = np.full((5, 10), fill_value=False, dtype=bool) # selecting one entry per trace mask[[0, 1, 0, 3, 0, 2, 4, 3, 1, 4], range(10)] = True @@ -302,7 +302,7 @@ def test__indexing_keeps_order_across_channels(self): def test__indexing_keeps_order_across_time(self): # AnalogSignals with 10 traces each having 5 samples (eg. data[0] = [0,10,20,30,40]) data = np.array([range(10), range(10, 20), range(20, 30), range(30, 40), range(40, 50)]) - mask = np.full((5, 10), fill_value=False) + mask = np.full((5, 10), fill_value=False, dtype=bool) # selecting two entries per trace temporal_ids = [0, 1, 0, 3, 1, 2, 4, 2, 1, 4] + [4, 3, 2, 1, 0, 1, 2, 3, 2, 1] mask[temporal_ids, list(range(10)) + list(range(10))] = True From e8c8f7e674af963643777098e2882f480c9db2b4 Mon Sep 17 00:00:00 2001 From: Andrew Davison Date: Fri, 16 Nov 2018 10:06:57 +0100 Subject: [PATCH 27/41] Introduce additional checks on argument values when creating Epochs; explicitly support passing scalar quantity as Epoch duration. --- neo/core/epoch.py | 15 +++++++++++---- neo/core/event.py | 8 ++++++-- neo/test/coretest/test_epoch.py | 15 +++++++++++++++ neo/test/coretest/test_event.py | 1 + neo/test/generate_datasets.py | 32 ++++++++++++++++++++++++++++++-- 5 files changed, 63 insertions(+), 8 deletions(-) diff --git a/neo/core/epoch.py b/neo/core/epoch.py index df3354578..7138d1c61 100644 --- a/neo/core/epoch.py +++ b/neo/core/epoch.py @@ -55,10 +55,10 @@ class Epoch(BaseNeo, pq.Quantity): dtype='|S4') *Required attributes/properties*: - :times: (quantity array 1D) The starts of the time periods. - :durations: (quantity array 1D) The length of the time period. - :labels: (numpy.array 1D dtype='S') Names or labels for the - time periods. + :times: (quantity array 1D) The start times of each time period. + :durations: (quantity array 1D or quantity scalar) The length(s) of each time period. + If a scalar, the same value is used for all time periods. + :labels: (numpy.array 1D dtype='S') Names or labels for the time periods. *Recommended attributes/properties*: :name: (str) A label for the dataset, @@ -82,8 +82,15 @@ def __new__(cls, times=None, durations=None, labels=None, units=None, times = np.array([]) * pq.s if durations is None: durations = np.array([]) * pq.s + elif durations.size != times.size: + if durations.size == 1: + durations = durations * np.ones_like(times.magnitude) + else: + raise ValueError("Durations array has different length to times") if labels is None: labels = np.array([], dtype='S') + elif len(labels) != times.size: + raise ValueError("Labels array has different length to times") if units is None: # No keyword units, so get from `times` try: diff --git a/neo/core/event.py b/neo/core/event.py index 684394516..b341c19ad 100644 --- a/neo/core/event.py +++ b/neo/core/event.py @@ -266,8 +266,7 @@ def to_epoch(self, pairwise=False, durations=None): This method has three modes of action. 1. By default, an array of `n` event times will be transformed into - an array of `n-1` epochs, where the end of one epoch is the - beginning of the next. + `n-1` epochs, where the end of one epoch is the beginning of the next. 2. If `pairwise` is True, then the event times will be taken as pairs representing the start and end time of an epoch. The number of events must be even, otherwise a ValueError is raised. @@ -278,6 +277,11 @@ def to_epoch(self, pairwise=False, durations=None): `pairwise=True` and `durations` are mutually exclusive. A ValueError will be raised if both are given. + + If `durations` is given, epoch labels are set to the corresponding + labels of the events that indicate the epoch start + If `durations` is not given, then the event labels A and B bounding + the epoch are used to set the labels of the epochs in the form 'A-B'. """ if pairwise: diff --git a/neo/test/coretest/test_epoch.py b/neo/test/coretest/test_epoch.py index 18e53673b..440a94e96 100644 --- a/neo/test/coretest/test_epoch.py +++ b/neo/test/coretest/test_epoch.py @@ -120,6 +120,21 @@ def test_Epoch_creation(self): self.assertEqual(epc.annotations['test2'], 'y1') self.assertTrue(epc.annotations['test3']) + def test_Epoch_creation_scalar_duration(self): + # test with scalar for durations + epc = Epoch([1.1, 1.5, 1.7] * pq.ms, + durations=20 * pq.ns, + labels=np.array(['test epoch 1', + 'test epoch 2', + 'test epoch 3'], dtype='S')) + assert_neo_object_is_compliant(epc) + + assert_arrays_equal(epc.times, [1.1, 1.5, 1.7] * pq.ms) + assert_arrays_equal(epc.durations, [20, 20, 20] * pq.ns) + self.assertEqual(epc.durations.size, 3) + assert_arrays_equal(epc.labels, + np.array(['test epoch 1', 'test epoch 2', 'test epoch 3'], dtype='S')) + def test_Epoch_repr(self): params = {'test2': 'y1', 'test3': True} epc = Epoch([1.1, 1.5, 1.7] * pq.ms, durations=[20, 40, 60] * pq.ns, diff --git a/neo/test/coretest/test_event.py b/neo/test/coretest/test_event.py index 142e450ab..a986b46ed 100644 --- a/neo/test/coretest/test_event.py +++ b/neo/test/coretest/test_event.py @@ -431,6 +431,7 @@ def test_to_epoch(self): np.array([5.0, 12.0, 23.0, 45.0])) assert_array_equal(epoch.durations.magnitude, np.array([2.0, 2.0, 2.0, 2.0])) + self.assertEqual(epoch.durations.size, 4) assert_array_equal(epoch.labels, np.array(['A', 'B', 'C', 'D'])) diff --git a/neo/test/generate_datasets.py b/neo/test/generate_datasets.py index 3794a526f..e2e4a932d 100644 --- a/neo/test/generate_datasets.py +++ b/neo/test/generate_datasets.py @@ -125,6 +125,8 @@ def generate_one_simple_segment(seg_name='segment 0', t = t + dur labels = np.array(labels, dtype='S') labels = labels[(rand(len(times)) * len(labels)).astype('i')] + assert len(times) == len(durations) + assert len(times) == len(labels) epc = Epoch(times=pq.Quantity(times, units=pq.s), durations=pq.Quantity([x[0] for x in durations], units=pq.s), @@ -319,6 +321,29 @@ def get_annotations(): return dict([(str(i), ann) for i, ann in enumerate(TEST_ANNOTATIONS)]) +def fake_epoch(seed=None, n=1): + """ + Create a fake Epoch. + + We use this separate function because the attributes of + Epoch are not independent (must all have the same size) + """ + kwargs = get_annotations() + size = np.random.randint(5, 15) + for i, attr in enumerate(Epoch._necessary_attrs + Epoch._recommended_attrs): + if seed is not None: + iseed = seed + i + else: + iseed = None + if attr[0] in ('times', 'durations', 'labels'): + kwargs[attr[0]] = get_fake_value(*attr, seed=iseed, obj=Epoch, shape=size) + else: + kwargs[attr[0]] = get_fake_value(*attr, seed=iseed, obj=Epoch, n=n) + kwargs['seed'] = seed + obj = Epoch(**kwargs) + return obj + + def fake_neo(obj_type="Block", cascade=True, seed=None, n=1): ''' Create a fake NEO object of a given type. Follows one-to-many @@ -336,8 +361,11 @@ def fake_neo(obj_type="Block", cascade=True, seed=None, n=1): cls = obj_type obj_type = obj_type.__name__ - kwargs = get_fake_values(obj_type, annotate=True, seed=seed, n=n) - obj = cls(**kwargs) + if cls is Epoch: + obj = fake_epoch(seed=seed, n=n) + else: + kwargs = get_fake_values(obj_type, annotate=True, seed=seed, n=n) + obj = cls(**kwargs) # if not cascading, we don't need to do any of the stuff after this if not cascade: From 9eb1ef3a16340ff7d3f35f7df8bcb8a4ae2e4bf9 Mon Sep 17 00:00:00 2001 From: Andrew Davison Date: Fri, 16 Nov 2018 10:08:56 +0100 Subject: [PATCH 28/41] Update docstring [ci skip] --- neo/core/event.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/core/event.py b/neo/core/event.py index b341c19ad..b27268e7c 100644 --- a/neo/core/event.py +++ b/neo/core/event.py @@ -261,7 +261,7 @@ def as_quantity(self): def to_epoch(self, pairwise=False, durations=None): """ - Transform Event to Epoch. + Returns a new Epoch object based on the times and labels in the Event object. This method has three modes of action. From 1a9d2d931c798090cbd49dc53645ae43b38c4bcd Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 16 Nov 2018 18:06:54 +0100 Subject: [PATCH 29/41] fromstring warnings --- neo/rawio/neuroexplorerrawio.py | 2 +- neo/rawio/plexonrawio.py | 2 +- neo/rawio/spike2rawio.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/neo/rawio/neuroexplorerrawio.py b/neo/rawio/neuroexplorerrawio.py index ebac824f5..3072ef654 100644 --- a/neo/rawio/neuroexplorerrawio.py +++ b/neo/rawio/neuroexplorerrawio.py @@ -267,7 +267,7 @@ def read_as_dict(fid, dtype, offset=None): if offset is not None: fid.seek(offset) dt = np.dtype(dtype) - h = np.fromstring(fid.read(dt.itemsize), dt)[0] + h = np.frombuffer(fid.read(dt.itemsize), dt)[0] info = OrderedDict() for k in dt.names: v = h[k] diff --git a/neo/rawio/plexonrawio.py b/neo/rawio/plexonrawio.py index dadc3aec9..68511e3f3 100644 --- a/neo/rawio/plexonrawio.py +++ b/neo/rawio/plexonrawio.py @@ -386,7 +386,7 @@ def read_as_dict(fid, dtype, offset=None): if offset is not None: fid.seek(offset) dt = np.dtype(dtype) - h = np.fromstring(fid.read(dt.itemsize), dt)[0] + h = np.frombuffer(fid.read(dt.itemsize), dt)[0] info = OrderedDict() for k in dt.names: v = h[k] diff --git a/neo/rawio/spike2rawio.py b/neo/rawio/spike2rawio.py index 72218086a..c846ab1eb 100644 --- a/neo/rawio/spike2rawio.py +++ b/neo/rawio/spike2rawio.py @@ -534,7 +534,7 @@ def read_as_dict(fid, dtype): Make conversion for strings. """ dt = np.dtype(dtype) - h = np.fromstring(fid.read(dt.itemsize), dt)[0] + h = np.frombuffer(fid.read(dt.itemsize), dt)[0] info = OrderedDict() for k in dt.names: v = h[k] From d296f1d18ac143d7d6ace3394931dcfc12deee28 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 16 Nov 2018 18:17:14 +0100 Subject: [PATCH 30/41] for regex replace string pattern with raw string r'....' pattern --- neo/io/blackrockio_v4.py | 8 +++---- neo/io/elphyio.py | 2 +- neo/io/neomatlabio.py | 2 +- neo/io/neuralynxio_v1.py | 40 +++++++++++++++++------------------ neo/rawio/brainvisionrawio.py | 2 +- neo/rawio/elanrawio.py | 14 ++++++------ neo/rawio/neuralynxrawio.py | 22 +++++++++---------- 7 files changed, 45 insertions(+), 45 deletions(-) diff --git a/neo/io/blackrockio_v4.py b/neo/io/blackrockio_v4.py index 97faea687..c2fcd69e1 100644 --- a/neo/io/blackrockio_v4.py +++ b/neo/io/blackrockio_v4.py @@ -232,22 +232,22 @@ def __init__(self, filename, nsx_override=None, nev_override=None, self._filenames = {} if nsx_override: self._filenames['nsx'] = re.sub( - os.path.extsep + 'ns[1,2,3,4,5,6]$', '', nsx_override) + os.path.extsep + r'ns[1,2,3,4,5,6]$', '', nsx_override) else: self._filenames['nsx'] = self.filename if nev_override: self._filenames['nev'] = re.sub( - os.path.extsep + 'nev$', '', nev_override) + os.path.extsep + r'nev$', '', nev_override) else: self._filenames['nev'] = self.filename if sif_override: self._filenames['sif'] = re.sub( - os.path.extsep + 'sif$', '', sif_override) + os.path.extsep + r'sif$', '', sif_override) else: self._filenames['sif'] = self.filename if ccf_override: self._filenames['ccf'] = re.sub( - os.path.extsep + 'ccf$', '', ccf_override) + os.path.extsep + r'ccf$', '', ccf_override) else: self._filenames['ccf'] = self.filename diff --git a/neo/io/elphyio.py b/neo/io/elphyio.py index 9fa8bde55..ac4ad125c 100644 --- a/neo/io/elphyio.py +++ b/neo/io/elphyio.py @@ -2985,7 +2985,7 @@ class LayoutFactory(object): def __init__(self, elphy_file): self.elphy_file = elphy_file - self.pattern = "\d{4}(\d+|\D)\D" + self.pattern = r"\d{4}(\d+|\D)\D" self.block_subclasses = dict() @property diff --git a/neo/io/neomatlabio.py b/neo/io/neomatlabio.py index f0606a21f..dc02e6415 100644 --- a/neo/io/neomatlabio.py +++ b/neo/io/neomatlabio.py @@ -385,7 +385,7 @@ def create_ob_from_struct(self, struct, classname): if attrname in dict_attributes: attrtype = dict_attributes[attrname][0] if attrtype == datetime: - m = '(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+).(\d+)' + m = r'(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+).(\d+)' r = re.findall(m, str(item)) if len(r) == 1: item = datetime(*[int(e) for e in r[0]]) diff --git a/neo/io/neuralynxio_v1.py b/neo/io/neuralynxio_v1.py index b7da8c4eb..f1970a818 100644 --- a/neo/io/neuralynxio_v1.py +++ b/neo/io/neuralynxio_v1.py @@ -1901,9 +1901,9 @@ def __read_text_header(self, filename, parameter_dict): 'Successfully decoded text header of file (%s).' % filename) def __get_cheetah_version_from_txt_header(self, text_header, filename): - version_regex = re.compile('((-CheetahRev )|' - '(ApplicationName Cheetah "))' - '(?P\d{1,3}\.\d{1,3}\.\d{1,3})') + version_regex = re.compile(r'((-CheetahRev )|' + r'(ApplicationName Cheetah "))' + r'(?P\d{1,3}\.\d{1,3}\.\d{1,3})') match = version_regex.search(text_header) if match: return match.groupdict()['version'] @@ -1913,27 +1913,27 @@ def __get_cheetah_version_from_txt_header(self, text_header, filename): def __get_filename_and_times_from_txt_header(self, text_header, version): if parse_version(version) <= parse_version('5.6.4'): - datetime1_regex = re.compile('## Time Opened \(m/d/y\): ' - '(?P\S+)' - ' \(h:m:s\.ms\) ' - '(?P