diff --git a/asammdf/blocks/mdf_v4.py b/asammdf/blocks/mdf_v4.py index a84ee11b6..31b3878b0 100644 --- a/asammdf/blocks/mdf_v4.py +++ b/asammdf/blocks/mdf_v4.py @@ -536,7 +536,6 @@ def _read(self, mapped=False): continue if group.raw_can: - try: _sig = self.get("CAN_DataFrame", group=i, ignore_invalidation_bits=True) except MdfException: @@ -1543,9 +1542,6 @@ def _append_structure_composition( inval_bits, inval_cntr, ): - - print(signal, signal.samples.dtype) - si_map = self._si_map fields = [] diff --git a/asammdf/blocks/utils.py b/asammdf/blocks/utils.py index 961fb5060..f68a9e400 100644 --- a/asammdf/blocks/utils.py +++ b/asammdf/blocks/utils.py @@ -1517,33 +1517,37 @@ def pandas_query_compatible(name): def load_can_database(file, contents=None): + file = Path(file) dbc = None - if file.exists() and file.suffix.lower() in ('.dbc', '.arxml'): - import_type = file.suffix.lower().strip('.') - loads = dbc_load if import_type == 'dbc' else arxml_load - if contents is None: + if file.suffix.lower() in ('.dbc', '.arxml'): + if contents is None and file.exists(): contents = file.read_bytes() - contents = BytesIO(contents) - try: - dbc = loads( - contents, - import_type=import_type, - key="db", - ) - except UnicodeDecodeError: - encoding = detect(contents)["encoding"] - contents = contents.decode( - encoding - ) - dbc = loads( - contents, - importType=import_type, - import_type=import_type, - key="db", - encoding=encoding, - ) + + if contents: + import_type = file.suffix.lower().strip('.') + loads = dbc_load if import_type == 'dbc' else arxml_load + + contents = BytesIO(contents) + try: + dbc = loads( + contents, + import_type=import_type, + key="db", + ) + except UnicodeDecodeError: + encoding = detect(contents)["encoding"] + contents = contents.decode( + encoding + ) + dbc = loads( + contents, + importType=import_type, + import_type=import_type, + key="db", + encoding=encoding, + ) return dbc diff --git a/asammdf/mdf.py b/asammdf/mdf.py index 2a1ca9a14..da3835a56 100644 --- a/asammdf/mdf.py +++ b/asammdf/mdf.py @@ -3687,6 +3687,9 @@ def extract_can_logging(self, dbc_files, version=None, ignore_invalid_signals=Fa if not group.CAN_logging: continue + if not 'CAN_DataFrame' in [ch.name for ch in group.channels]: + continue + parents, dtypes = self._prepare_record(group) data = self._load_data(group) @@ -3736,7 +3739,7 @@ def extract_can_logging(self, dbc_files, version=None, ignore_invalid_signals=Fa bus_msg_ids = msg_ids.samples[idx] bus_data_bytes = data_bytes[idx] - unique_ids = np.unique(bus_msg_ids) + unique_ids = np.unique(bus_msg_ids).astype('