Skip to content

Commit

Permalink
Remove unused interleaved reading code (#234)
Browse files Browse the repository at this point in the history
  • Loading branch information
adamreeve committed Mar 21, 2021
1 parent 76d5f1e commit 3f50d12
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 37 deletions.
43 changes: 6 additions & 37 deletions nptdms/tdms_segment.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,17 +365,13 @@ class InterleavedDataReader(BaseDataReader):
def read_data_chunks(self, file, data_objects, num_chunks):
""" Read multiple data chunks at once
"""
# If all data types are sized and all the lengths are
# the same, then we can read all data at once with numpy,
# which is much faster
all_sized = all(
o.data_type.size is not None for o in data_objects)
if len(data_objects) == 0:
return []
same_length = (len(
set((o.number_values for o in data_objects))) == 1)
if all_sized and same_length:
return [self._read_interleaved_sized(file, data_objects, num_chunks)]
else:
return [self._read_interleaved(file, data_objects, num_chunks)]
if not same_length:
raise ValueError("Cannot read interleaved data with different chunk sizes")
return [self._read_interleaved_chunks(file, data_objects, num_chunks)]

def read_channel_data_chunks(self, file, data_objects, channel_path, chunk_offset, stop_chunk):
""" Read multiple data chunks for a single channel at once
Expand All @@ -389,7 +385,7 @@ def _read_data_chunk(self, file, data_objects, chunk_index):
"""
raise NotImplementedError("Reading a single chunk is not implemented for interleaved data")

def _read_interleaved_sized(self, file, data_objects, num_chunks):
def _read_interleaved_chunks(self, file, data_objects, num_chunks):
"""Read interleaved data where all channels have a sized data type and the same length
"""
total_data_width = sum(o.data_type.size for o in data_objects)
Expand All @@ -416,25 +412,6 @@ def _read_interleaved_sized(self, file, data_objects, num_chunks):

return RawDataChunk.channel_data(channel_data)

def _read_interleaved(self, file, data_objects, num_chunks):
"""Read interleaved data that doesn't have a numpy type"""

log.debug("Reading interleaved data point by point")
object_data = {}
points_added = {}
for obj in data_objects:
object_data[obj.path] = obj.new_segment_data()
points_added[obj.path] = 0
while any([points_added[o.path] < (o.number_values * num_chunks)
for o in data_objects]):
for obj in data_objects:
if points_added[obj.path] < obj.number_values:
object_data[obj.path][points_added[obj.path]] = (
obj.read_value(file, self.endianness))
points_added[obj.path] += 1

return RawDataChunk.channel_data(object_data)


class ContiguousDataReader(BaseDataReader):
""" Reads data in a TDMS segment with contiguous (non-interleaved) data
Expand Down Expand Up @@ -516,14 +493,6 @@ def read_raw_data_index(self, f, raw_data_index_header, endianness):
else:
self.data_size = self.number_values * self.data_type.size

def read_value(self, file, endianness):
"""Read a single value from the given file"""

if self.data_type.nptype is not None:
dtype = self.data_type.nptype.newbyteorder(endianness)
return fromfile(file, dtype=dtype, count=1)[0]
return self.data_type.read(file, endianness)

def read_values(self, file, number_values, endianness):
"""Read all values for this object from a contiguous segment"""

Expand Down
52 changes: 52 additions & 0 deletions nptdms/test/test_tdms_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -863,3 +863,55 @@ def test_multiple_close_after_open():
tdms_data.close()
finally:
os.remove(temp_file.name)


def test_interleaved_segment_different_length():
test_file = GeneratedFile()
test_file.add_segment(
("kTocMetaData", "kTocRawData", "kTocNewObjList", "kTocInterleavedData"),
segment_objects_metadata(
channel_metadata("/'group'/'channel1'", 3, 3),
channel_metadata("/'group'/'channel2'", 3, 2),
),
"01 00 00 00" "02 00 00 00"
"01 00 00 00" "02 00 00 00"
"01 00 00 00"
)

with pytest.raises(ValueError) as exc_info:
_ = test_file.load()
assert str(exc_info.value) == "Cannot read interleaved data with different chunk sizes"


def test_interleaved_segment_unsized():
test_file = GeneratedFile()
string_channel_metadata = (
# Length of the object path
"18 00 00 00" +
string_hexlify("/'group'/'StringChannel'") +
# Length of index information
"1C 00 00 00"
# Raw data data type
"20 00 00 00"
# Dimension
"01 00 00 00"
# Number of raw data values
"02 00 00 00"
"00 00 00 00"
# Number of bytes in data
"19 00 00 00"
"00 00 00 00"
# Number of properties (0)
"00 00 00 00")
test_file.add_segment(
("kTocMetaData", "kTocRawData", "kTocNewObjList", "kTocInterleavedData"),
segment_objects_metadata(
channel_metadata("/'group'/'channel1'", 3, 2),
string_channel_metadata,
),
"01 00 00 00" "02 00 00 00"
"01 00 00 00" "02 00 00 00"
)

with pytest.raises(TypeError):
_ = test_file.load()

0 comments on commit 3f50d12

Please sign in to comment.