From 5913e09d42d025e81b4f269e1475d2604f014fe3 Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Wed, 30 Jul 2025 17:55:14 +0100 Subject: [PATCH 01/13] feat(timeseries): separate saving and plotting of individual files --- pyneuroml/lems/__init__.py | 147 +++++++++++++++++----------- pyneuroml/plot/Plot.py | 2 + pyneuroml/plot/PlotSpikes.py | 51 +++++----- pyneuroml/plot/PlotTimeSeries.py | 99 ++++++++++++++++--- tests/lems/test_lems.py | 8 +- tests/plot/test_plot_time_series.py | 60 +++++++++++- 6 files changed, 267 insertions(+), 100 deletions(-) diff --git a/pyneuroml/lems/__init__.py b/pyneuroml/lems/__init__.py index af4d764c..f2e785ae 100644 --- a/pyneuroml/lems/__init__.py +++ b/pyneuroml/lems/__init__.py @@ -4,6 +4,7 @@ import random import shutil import typing +from datetime import datetime import neuroml from lxml import etree @@ -485,10 +486,15 @@ def get_pop_index(quantity): def load_sim_data_from_lems_file( lems_file_name: str, base_dir: str = ".", - get_events: bool = True, + get_events: bool = False, get_traces: bool = True, -) -> typing.Optional[typing.Union[typing.Tuple[typing.Dict, typing.Dict], typing.Dict]]: - """Load simulation outputs using the LEMS simulation file + t_run: datetime = datetime(1900, 1, 1), + remove_dat_files_after_load: bool = False, +) -> typing.Tuple[ + typing.Optional[typing.Dict[str, typing.Dict]], + typing.Optional[typing.Dict[str, typing.Dict]], +]: + """Load simulation outputs from LEMS simulation run. .. versionadded:: 1.2.2 @@ -496,17 +502,35 @@ def load_sim_data_from_lems_file( :type lems_file_name: str :param base_dir: directory to run in :type base_dir: str + :param t_run: time of run + :type t_run: datetime + :param get_events: toggle whether events should be loaded + :type get_events: bool + :param get_traces: toggle whether traces should be loaded + :type get_traces: bool + :param remove_dat_files_after_load: toggle if data files should be deleted after they've been loaded + :type remove_dat_files_after_load: bool + :returns: if both `get_events` and `get_traces` are selected, a tuple with - two dictionaries, one for traces, one for events, is returned. + two dictionaries of dictionaries, one for traces, one for events, is + returned: + + .. code-block:: python + + all_traces, all_events - Otherwise one dictionary for whichever was selected. + Otherwise one dictionary of dictionaries for whichever was selected is + returned, with None for the other. The events dictionary has the following format: .. code-block:: python { - '': { 'cell id': [] } + "outputfile": + { + '': { 'cell id': [] } + } } The traces dictionary has the following format: @@ -514,33 +538,37 @@ def load_sim_data_from_lems_file( .. code-block:: python { - 't': [], - 'col 1': [] - 'col 2': [] + "outputfile": + { + 't': [], + 'col 1': [] + 'col 2': [] + } } - :raises ValueError: if neither traces nor events are selected for loading - :raises ValueError: if no traces are found - :raises ValueError: if no events are found + Each list has multiple dictionaries, one each for each output file in + the LEMS file. + :raises ValueError: if neither traces nor events are selected for loading + :raises OSError: if simulation output data file could not be found + :raises Exception: if the output file has not been modified since the + simulation was run (given as `t_run`) """ + if not get_events and not get_traces: + raise ValueError("One of get_events or get_traces must be True") + + all_traces: typing.Dict[str, typing.Dict] = {} + all_events: typing.Dict[str, typing.Dict] = {} + if not os.path.isfile(lems_file_name): real_lems_file = os.path.realpath(os.path.join(base_dir, lems_file_name)) else: real_lems_file = os.path.realpath(lems_file_name) - if not get_events and not get_traces: - raise ValueError("One of events or traces must be True") - logger.debug( - "Reloading data specified in LEMS file: %s (%s), base_dir: %s, cwd: %s;" - % (lems_file_name, real_lems_file, base_dir, os.getcwd()) + f"Reloading data specified in LEMS file: {lems_file_name} ({real_lems_file}), base_dir: {base_dir}, cwd: {os.getcwd()}" ) - # Could use pylems to parse all this... - traces = {} # type: dict - events = {} # type: dict - base_lems_file_path = os.path.dirname(os.path.realpath(lems_file_name)) tree = etree.parse(real_lems_file) @@ -549,6 +577,8 @@ def load_sim_data_from_lems_file( possible_prefixes = ["{http://www.neuroml.org/lems/0.7.2}"] if sim is None: + # print(tree.getroot().nsmap) + # print(tree.getroot().getchildren()) for pre in possible_prefixes: for comp in tree.getroot().findall(pre + "Component"): if comp.attrib["type"] == "Simulation": @@ -558,6 +588,8 @@ def load_sim_data_from_lems_file( if get_events: event_output_files = sim.findall(ns_prefix + "EventOutputFile") for i, of in enumerate(event_output_files): + events: typing.Dict = {} + name = of.attrib["fileName"] file_name = os.path.join(base_dir, name) if not os.path.isfile(file_name): # If not relative to the LEMS file... @@ -570,13 +602,11 @@ def load_sim_data_from_lems_file( # file_name = os.path.join(os.getcwd(),'NeuroML2','results',name) # ... try relative to cwd in NeuroML2/results subdir. if not os.path.isfile(file_name): # If not relative to the base dir... - raise OSError( - ("Could not find simulation output " "file %s" % file_name) - ) + raise OSError(f"Could not find simulation output file {file_name}") + format_ = of.attrib["format"] - logger.info( - "Loading saved events from %s (format: %s)" % (file_name, format_) - ) + logger.info(f"Loading saved events from {file_name} (format: {format_})") + selections = {} for col in of.findall(ns_prefix + "EventSelection"): id_ = int(col.attrib["id"]) @@ -593,21 +623,22 @@ def load_sim_data_from_lems_file( elif format_ == "ID_TIME": id_ = int(values[0]) t = float(values[1]) - if id_ in selections: - logger.debug( - "Found a event in cell %s (%s) at t = %s" - % (id_, selections[id_], t) - ) - events[selections[id_]].append(t) + logger.debug( + f"Found a event in cell {id_} ({selections[id_]}) at t = {t}" + ) + events[selections[id_]].append(t) - else: - logger.warning("ID %s not found in selections dictionary" % id_) - continue # skip this event + if remove_dat_files_after_load: + logger.warning( + f"Removing file {file_name} after having loading its data!" + ) + os.remove(file_name) + all_events[name] = events if get_traces: output_files = sim.findall(ns_prefix + "OutputFile") - for i, of in enumerate(output_files): + traces: typing.Dict = {} traces["t"] = [] name = of.attrib["fileName"] file_name = os.path.join(base_dir, name) @@ -623,11 +654,15 @@ def load_sim_data_from_lems_file( file_name = os.path.join(os.getcwd(), "NeuroML2", "results", name) # ... try relative to cwd in NeuroML2/results subdir. if not os.path.isfile(file_name): # If not relative to the LEMS file... - raise OSError( - ("Could not find simulation output " "file %s" % file_name) + raise OSError(f"Could not find simulation output file {file_name}") + + t_file_mod = datetime.fromtimestamp(os.path.getmtime(file_name)) + if t_file_mod < t_run: + raise Exception( + f"Expected output file {file_name}s has not been modified since " + f"{t_file_mod} but the simulation was run later at {t_run}." ) - logger.info("Loading traces from %s" % (file_name)) cols = [] cols.append("t") for col in of.findall(ns_prefix + "OutputColumn"): @@ -635,29 +670,23 @@ def load_sim_data_from_lems_file( traces[quantity] = [] cols.append(quantity) - # TODO: could be quicker using numpy etc? with open(file_name) as f: for line in f: values = line.split() for vi in range(len(values)): traces[cols[vi]].append(float(values[vi])) - if get_events is True and get_traces is True: - if len(events) == 0: - raise ValueError("No events found") - if len(traces) == 0: - raise ValueError("No traces found") - logger.debug("Returning both traces and events") - return traces, events + if remove_dat_files_after_load: + logger.warning( + f"Removing file {file_name}s after having loading its data!" + ) + os.remove(file_name) + + all_traces[name] = traces + + if get_events and get_traces: + return all_traces, all_events + elif get_traces: + return all_traces, None else: - if get_events is True: - if len(events) == 0: - raise ValueError("No events found") - logger.debug("Returning events") - return events - elif get_traces is True: - if len(traces) == 0: - raise ValueError("No traces found") - logger.debug("Returning traces") - return traces - return None + return None, all_events diff --git a/pyneuroml/plot/Plot.py b/pyneuroml/plot/Plot.py index 25bac5b8..1ae603d9 100644 --- a/pyneuroml/plot/Plot.py +++ b/pyneuroml/plot/Plot.py @@ -140,6 +140,8 @@ def generate_plot( :type legend_position: str :param show_plot_already: if plot should be shown when created (default: True) + Otherwise, the plots are not shown and you must call pyplot.show() + explicitly to show them. Should not be used with `close_plot`. :type show_plot_already: boolean :param animate: if plot should be animated (default: False) :type animate: boolean diff --git a/pyneuroml/plot/PlotSpikes.py b/pyneuroml/plot/PlotSpikes.py index 4723d959..d414b870 100644 --- a/pyneuroml/plot/PlotSpikes.py +++ b/pyneuroml/plot/PlotSpikes.py @@ -480,7 +480,7 @@ def plot_spikes_from_lems_file( :type lems_file_name: str :param base_dir: Directory where the LEMS file resides. Defaults to the current directory. :type base_dir: str - :param show_plots_already: Whether to show the plots immediately after they are generated. Defaults to True. + :param show_plots_already: Whether to show the plots :type show_plots_already: bool :param save_spike_plot_to: Path to save the spike plot to. If `None`, the plot will not be saved. Defaults to `None`. :type save_spike_plot_to: Optional[str] @@ -493,31 +493,38 @@ def plot_spikes_from_lems_file( :return: None :rtype: None """ - event_data = pynmll.load_sim_data_from_lems_file( + all_events: List[Dict] = pynmll.load_sim_data_from_lems_file( lems_file_name, get_events=True, get_traces=False ) - spike_data = [] # type: List[Dict] - for select, times in event_data.items(): - new_dict = {"name": select} - new_dict["times"] = times - # the plot_spikes function will add an offset for each data entry, so - # we set the ids to 0 here - new_dict["ids"] = [0] * len(times) - - spike_data.append(new_dict) - - logger.debug("Spike data is:") - logger.debug(spike_data) + if len(all_events) > 1: + show_each_plot_already = False + + for event_data in all_events: + spike_data = [] # type: List[Dict] + for select, times in event_data.items(): + new_dict = {"name": select} + new_dict["times"] = times + # the plot_spikes function will add an offset for each data entry, so + # we set the ids to 0 here + new_dict["ids"] = [0] * len(times) + + spike_data.append(new_dict) + + logger.debug("Spike data is:") + logger.debug(spike_data) + + plot_spikes( + spike_data, + show_plots_already=show_each_plot_already, + save_spike_plot_to=save_spike_plot_to, + rates=rates, + rate_window=rate_window, + rate_bins=rate_bins, + ) - plot_spikes( - spike_data, - show_plots_already=show_plots_already, - save_spike_plot_to=save_spike_plot_to, - rates=rates, - rate_window=rate_window, - rate_bins=rate_bins, - ) + if show_plots_already is True and show_each_plot_already is False: + plt.show() def main(args: Optional[argparse.Namespace] = None) -> None: diff --git a/pyneuroml/plot/PlotTimeSeries.py b/pyneuroml/plot/PlotTimeSeries.py index a6886513..ab078039 100644 --- a/pyneuroml/plot/PlotTimeSeries.py +++ b/pyneuroml/plot/PlotTimeSeries.py @@ -27,6 +27,7 @@ TIME_SERIES_PLOTTER_DEFAULTS = { "offset": False, "labels": False, + "singlePlot": False, } @@ -84,6 +85,11 @@ def plot_time_series( if isinstance(trace_data, dict): trace_data = [trace_data] + # if scalebar needs to be drawn, we need to wait until all plots are done + show_this_plot_already = ( + False if scalebar_location is not None else show_plot_already + ) + num_traces = 0 for td in trace_data: num_traces += len(td) @@ -91,10 +97,7 @@ def plot_time_series( xs = [] ys = [] - if labels is True: - labelvals = [] - else: - labelvals = None + labelvals: typing.List[str] = [] # calculate trace width miny = float(math.inf) @@ -152,9 +155,9 @@ def plot_time_series( xvalues=xs, yvalues=ys, title=title, - labels=labelvals, + labels=labelvals if labels is True else None, show_yticklabels=show_yticklabels, - show_plot_already=False if scalebar_location is not None else True, + show_plot_already=show_this_plot_already, **kwargs_generate_plot, ) @@ -184,6 +187,7 @@ def plot_time_series_from_lems_file( base_dir: str = ".", title: str = "", labels: bool = True, + show_plot_already: bool = True, **kwargs, ) -> None: """Plot time series from a LEMS file. @@ -204,21 +208,42 @@ def plot_time_series_from_lems_file( :type base_dir: str :param labels: toggle whether plots should be labelled :type labels: bool + :param show_plot_already: whether the generated plots should be shown: + useful if you want to only save the plots to files without showing them + :type show_plot_already: bool :param kwargs: other arguments passed to `plot_time_series` :returns: None """ - traces = pynmll.load_sim_data_from_lems_file( + show_each_plot_already = True + all_traces = pynmll.load_sim_data_from_lems_file( lems_file_name, get_events=False, get_traces=True ) - plot_time_series(traces, labels=labels, xaxis="Time (s)", **kwargs) + if len(all_traces) > 1: + show_each_plot_already = False + + for traces in all_traces: + plot_time_series( + traces, + labels=labels, + xaxis="Time (s)", + show_plot_already=show_each_plot_already and show_plot_already, + **kwargs, + ) + + # show all plots together at end + # if show_each_plot_already is True, each plot will pop up as its plotted + if show_plot_already is True and show_each_plot_already is False: + plt.show() def plot_time_series_from_data_files( data_file_names: typing.Union[str, typing.List[str]], labels: bool = True, columns: typing.Optional[typing.List[int]] = None, + single_plot: bool = False, + show_plot_already: bool = True, **kwargs, ): """Plot time series from a data file. @@ -233,17 +258,39 @@ def plot_time_series_from_data_files( :type labels: bool :param columns: column indices to plot :type columns: list of ints: [1, 2, 3] + :param single_plot: whether all data should be plotted in one single plot + :type single_plot: bool :param kwargs: other key word arguments that are passed to the `plot_time_series` function """ - all_traces = [] + show_each_plot_already = True + save_each_plot = False if isinstance(data_file_names, str): data_file_names = [data_file_names] + if len(data_file_names) > 1 and single_plot is False: + show_each_plot_already = False + + # makes no sense as it'll keep being overwritten by each new plot + # so we ignore it and inform the user + if "save_figure_to" in kwargs: + logger.warning( + "Multiple files given and single_plot not set. Ignoring 'save_figure_to'" + ) + logger.warning("Files will be saved to .png") + kwargs.pop("save_figure_to") + save_each_plot = True + + if len(data_file_names) == 1: + single_plot = True + + traces = {} + filenum = 0 for f in data_file_names: print(f"Processing: {f}") - traces = {} + filenum += 1 + data_array = numpy.loadtxt(f) traces["t"] = data_array[:, 0] num_cols = numpy.shape(data_array)[1] @@ -252,10 +299,29 @@ def plot_time_series_from_data_files( if i not in columns: logger.warning(f"Skipping column {i}") continue - traces[f"{f}_{i}"] = data_array[:, i] - all_traces.append(traces) - plot_time_series(all_traces, labels=labels, **kwargs) + if single_plot: + traces[f"{f}_{filenum}_{i}"] = data_array[:, i] + else: + traces[f"{f}_{i}"] = data_array[:, i] + + if not single_plot: + plot_time_series( + traces, + labels=labels, + show_plot_already=show_each_plot_already and show_plot_already, + save_figure_to=f"{f}.png" if save_each_plot else None, + **kwargs, + ) + traces = {} + + if single_plot: + plot_time_series( + traces, labels=labels, show_plot_already=show_plot_already, **kwargs + ) + + if show_plot_already is True and show_each_plot_already is False: + plt.plot() def _process_time_series_plotter_args(): @@ -293,6 +359,12 @@ def _process_time_series_plotter_args(): default=TIME_SERIES_PLOTTER_DEFAULTS["offset"], help=("Offset plots"), ) + parser.add_argument( + "-singlePlot", + action="store_true", + default=TIME_SERIES_PLOTTER_DEFAULTS["singlePlot"], + help=("For data files: whether they should be plotted in a single plot"), + ) parser.add_argument( "-saveToFile", type=str, @@ -325,6 +397,7 @@ def _time_series_plotter_main(args=None): columns=a.columns, offset=a.offset, labels=a.labels, + single_plot=a.single_plot, bottom_left_spines_only=True, save_figure_to=a.save_to_file, ) diff --git a/tests/lems/test_lems.py b/tests/lems/test_lems.py index 8606ab51..4130674f 100644 --- a/tests/lems/test_lems.py +++ b/tests/lems/test_lems.py @@ -113,19 +113,23 @@ def test_load_sim_data_from_lems_file(self): f.flush() f.close() - events = pyl.load_sim_data_from_lems_file( + all_events = pyl.load_sim_data_from_lems_file( f.name, base_dir=".", get_events=True, get_traces=False ) - traces = pyl.load_sim_data_from_lems_file( + all_traces = pyl.load_sim_data_from_lems_file( f.name, base_dir=".", get_events=False, get_traces=True ) + self.assertIsNotNone(all_events) + events = all_events[event_data_file.name] self.assertIsNotNone(events) self.assertEqual(events["IzPop0[0]"][0], 0.04350000000009967) self.assertEqual(events["IzPop0[0]"][-1], 0.9433999999997897) print(events) + self.assertIsNotNone(all_traces) + traces = all_traces[trace_file.name] self.assertIsNotNone(traces) self.assertEqual(traces["t"][0], 0.0) self.assertEqual(traces["t"][-1], 0.0019) diff --git a/tests/plot/test_plot_time_series.py b/tests/plot/test_plot_time_series.py index 9308e958..2c38f066 100644 --- a/tests/plot/test_plot_time_series.py +++ b/tests/plot/test_plot_time_series.py @@ -86,12 +86,59 @@ def test_plot_time_series_from_data_file(self): show_plot_already=False, save_figure_to="time-series-test-from-file-2.png", ) - self.assertIsFile("time-series-test-from-file.png") + self.assertIsFile("time-series-test-from-file-2.png") os.unlink("time-series-test-from-file.png") os.unlink("time-series-test-from-file-2.png") os.unlink(trace_file.name) + def test_plot_time_series_from_x_data_files(self): + """Test plot_time_series_from_data_file function""" + trace_file = tempfile.NamedTemporaryFile(mode="w", delete=False, dir=".") + for i in range(0, 1000): + print( + f"{i/1000}\t{numpy.random.default_rng().random()}\t{numpy.random.default_rng().random()}\t{numpy.random.default_rng().random()}", + file=trace_file, + ) + trace_file.flush() + trace_file.close() + + trace_file2 = tempfile.NamedTemporaryFile(mode="w", delete=False, dir=".") + for i in range(0, 1000): + print( + f"{i/1000}\t{numpy.random.default_rng().random()}\t{numpy.random.default_rng().random()}\t{numpy.random.default_rng().random()}", + file=trace_file2, + ) + trace_file2.flush() + trace_file2.close() + + pyplts.plot_time_series_from_data_files( + [trace_file.name, trace_file2.name], + title="", + offset=True, + show_plot_already=False, + single_plot=True, + save_figure_to="time-series-test-from-files.png", + ) + self.assertIsFile("time-series-test-from-files.png") + + os.unlink("time-series-test-from-files.png") + + pyplts.plot_time_series_from_data_files( + [trace_file.name, trace_file2.name], + title="", + offset=False, + show_plot_already=False, + save_figure_to="something", + ) + self.assertIsFile(f"{trace_file.name}.png") + self.assertIsFile(f"{trace_file2.name}.png") + os.unlink(f"{trace_file.name}.png") + os.unlink(f"{trace_file2.name}.png") + + os.unlink(trace_file.name) + os.unlink(trace_file2.name) + def test_plot_time_series_from_lems_file(self): """Test plot_time_series_from_lems_file function""" trace_file = tempfile.NamedTemporaryFile(mode="w", delete=False, dir=".") @@ -128,6 +175,11 @@ def test_plot_time_series_from_lems_file(self): + + + + + @@ -148,7 +200,7 @@ def test_plot_time_series_from_lems_file(self): lems_file.name, title="", offset=False, - show_plot_already=False, + show_plot_already=True, save_figure_to="time-series-test-from-lems-file.png", ) self.assertIsFile("time-series-test-from-lems-file.png") @@ -157,10 +209,10 @@ def test_plot_time_series_from_lems_file(self): lems_file.name, title="", offset=True, - show_plot_already=False, + show_plot_already=True, save_figure_to="time-series-test-from-lems-file-2.png", ) - self.assertIsFile("time-series-test-from-lems-file.png") + self.assertIsFile("time-series-test-from-lems-file-2.png") os.unlink("time-series-test-from-lems-file.png") os.unlink("time-series-test-from-lems-file-2.png") From bab77aa4b88af3fcf5faecbe1fcd0acaa000d17b Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Thu, 31 Jul 2025 13:42:39 +0100 Subject: [PATCH 02/13] fix: update imports to prevent circular imports --- pyneuroml/lems/LEMSSimulation.py | 6 ++--- pyneuroml/plot/Plot.py | 10 +++----- tests/channelml/test_channelml.py | 4 ++-- tests/test_pynml.py | 40 +++++++++++++------------------ 4 files changed, 25 insertions(+), 35 deletions(-) diff --git a/pyneuroml/lems/LEMSSimulation.py b/pyneuroml/lems/LEMSSimulation.py index b2c5396c..82b574c4 100644 --- a/pyneuroml/lems/LEMSSimulation.py +++ b/pyneuroml/lems/LEMSSimulation.py @@ -12,8 +12,8 @@ import airspeed from neuroml import __version__ as libnml_ver +import pyneuroml.pynml as pynml2 from pyneuroml import __version__ as pynml_ver -from pyneuroml.pynml import read_lems_file, read_neuroml2_file from pyneuroml.utils.plot import get_next_hex_color from pyneuroml.utils.units import convert_to_units @@ -182,7 +182,7 @@ def include_neuroml2_file( self.include_files.append(nml2_file_name) if include_included: - cell = read_neuroml2_file(full_path) + cell = pynml2.read_neuroml2_file(full_path) for include in cell.includes: self.include_neuroml2_file( include.href, include_included=True, relative_to_dir=base_path @@ -204,7 +204,7 @@ def include_lems_file( self.include_files.append(lems_file_name) if include_included: - model = read_lems_file(lems_file_name) + model = pynml2.read_lems_file(lems_file_name) for inc in model.included_files: self.include_files.append(inc) diff --git a/pyneuroml/plot/Plot.py b/pyneuroml/plot/Plot.py index 1ae603d9..43facdfe 100644 --- a/pyneuroml/plot/Plot.py +++ b/pyneuroml/plot/Plot.py @@ -209,7 +209,7 @@ def generate_plot( f"values to plot ({len(xvalues)}) and markersizes ({len(markersizes)}) must have the same length" ) - logger.info("Generating plot: %s" % (title)) + logger.info("Generating plot" + (f": {title}" if title else "")) from matplotlib import pyplot as plt from matplotlib import rcParams @@ -382,15 +382,11 @@ def update(frame): ) pbar.finish(dirty=False) - logger.info("Saved animation to %s" % (save_figure_to)) + logger.info(f"Saved animation to {os.path.abspath(save_figure_to)}") else: if save_figure_to: - logger.info( - "Saving image to %s of plot: %s" - % (os.path.abspath(save_figure_to), title) - ) plt.savefig(save_figure_to, bbox_inches="tight") - logger.info("Saved image to %s of plot: %s" % (save_figure_to, title)) + logger.info(f"Saved image to {os.path.abspath(save_figure_to)}") if show_plot_already: if interactive_legend is True and legend_box is not None: diff --git a/tests/channelml/test_channelml.py b/tests/channelml/test_channelml.py index 02c2b497..ddd90aaa 100644 --- a/tests/channelml/test_channelml.py +++ b/tests/channelml/test_channelml.py @@ -11,8 +11,8 @@ import pathlib as pl import unittest +import pyneuroml.pynml as pynml2 from pyneuroml.channelml import channelml2nml -from pyneuroml.pynml import validate_neuroml2 from .. import BaseTestCase @@ -38,7 +38,7 @@ def test_channelml2nml(self): print(retval) with open(outfile, "w") as f: print(retval, file=f, flush=True) - self.assertTrue(validate_neuroml2(outfile)) + self.assertTrue(pynml2.validate_neuroml2(outfile)) pl.Path(outfile).unlink() diff --git a/tests/test_pynml.py b/tests/test_pynml.py index 3435f9a4..378193a3 100644 --- a/tests/test_pynml.py +++ b/tests/test_pynml.py @@ -13,15 +13,7 @@ import shutil import unittest -from pyneuroml.pynml import ( - execute_command_in_dir, - execute_command_in_dir_with_realtime_output, - extract_lems_definition_files, - list_exposures, - list_recording_paths_for_exposures, - run_jneuroml, - validate_neuroml2, -) +import pyneuroml.pynml as pynmlpynml logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -45,7 +37,7 @@ def test_lems_def_files_extraction(self): "Synapses.xml", ] - extraction_dir = extract_lems_definition_files() + extraction_dir = pynmlpynml.extract_lems_definition_files() newfilelist = os.listdir(extraction_dir) shutil.rmtree(extraction_dir[: -1 * len("NeuroML2CoreTypes/")]) assert sorted(filelist) == sorted(newfilelist) @@ -56,7 +48,7 @@ class TestHelperUtils(unittest.TestCase): def test_exposure_listing(self): """Test listing of exposures in NeuroML documents.""" - exps = list_exposures("tests/izhikevich_test_file.nml", "iz") + exps = pynmlpynml.list_exposures("tests/izhikevich_test_file.nml", "iz") ctypes = {} for key, val in exps.items(): ctypes[key.type] = val @@ -74,13 +66,13 @@ def test_exposure_listing(self): def test_exposure_listing_2(self): """Test listing of exposures in NeuroML documents.""" os.chdir("tests/") - exps = list_exposures("HH_example_net.nml") + exps = pynmlpynml.list_exposures("HH_example_net.nml") print(exps) os.chdir("../") def test_recording_path_listing(self): """Test listing of recording paths in NeuroML documents.""" - paths = list_recording_paths_for_exposures( + paths = pynmlpynml.list_recording_paths_for_exposures( "tests/izhikevich_test_file.nml", "", "IzhNet" ) print("\n".join(paths)) @@ -90,7 +82,7 @@ def test_recording_path_listing(self): def test_recording_path_listing_2(self): """Test listing of recording paths in NeuroML documents.""" os.chdir("tests/") - paths = list_recording_paths_for_exposures( + paths = pynmlpynml.list_recording_paths_for_exposures( "HH_example_net.nml", "hh_cell", "single_hh_cell_network" ) print("\n".join(paths)) @@ -104,7 +96,7 @@ def test_execute_command_in_dir(self): output = None retcode = None - retcode, output = execute_command_in_dir( + retcode, output = pynmlpynml.execute_command_in_dir( command, exec_in_dir, verbose=verbose, prefix=" jNeuroML >> " ) @@ -114,7 +106,7 @@ def test_execute_command_in_dir(self): command_bad = "ls non_existent_file" output = None retcode = None - retcode, output = execute_command_in_dir( + retcode, output = pynmlpynml.execute_command_in_dir( command_bad, exec_in_dir, verbose=verbose, prefix=" jNeuroML >> " ) self.assertNotEqual(retcode, 0) @@ -127,7 +119,7 @@ def test_execute_command_in_dir_with_realtime_output(self): verbose = True success = False - success = execute_command_in_dir_with_realtime_output( + success = pynmlpynml.execute_command_in_dir_with_realtime_output( command, exec_in_dir, verbose=verbose, prefix=" jNeuroML >> " ) self.assertTrue(success) @@ -135,7 +127,7 @@ def test_execute_command_in_dir_with_realtime_output(self): command_bad = "ls non_existent_file" success = True - success = execute_command_in_dir_with_realtime_output( + success = pynmlpynml.execute_command_in_dir_with_realtime_output( command_bad, exec_in_dir, verbose=verbose, prefix=" jNeuroML >> " ) self.assertFalse(success) @@ -143,23 +135,23 @@ def test_execute_command_in_dir_with_realtime_output(self): def test_run_jneuroml(self): """Test run_jneuroml""" retstat = None - retstat = run_jneuroml("-v", None, None) + retstat = pynmlpynml.run_jneuroml("-v", None, None) self.assertTrue(retstat) retstat = None - retstat = run_jneuroml("-randomflag", "", "") + retstat = pynmlpynml.run_jneuroml("-randomflag", "", "") self.assertFalse(retstat) def test_validate_neuroml2(self): """Test validate_neuroml2""" os.chdir("tests/") retval = None - retval = validate_neuroml2("HH_example_k_channel.nml") + retval = pynmlpynml.validate_neuroml2("HH_example_k_channel.nml") self.assertTrue(retval) retval = None retstring = None - retval, retstring = validate_neuroml2( + retval, retstring = pynmlpynml.validate_neuroml2( "HH_example_k_channel.nml", return_string=True ) self.assertTrue(retval) @@ -168,7 +160,9 @@ def test_validate_neuroml2(self): retval = None retstring = None - retval, retstring = validate_neuroml2("setup.cfg", return_string=True) + retval, retstring = pynmlpynml.validate_neuroml2( + "setup.cfg", return_string=True + ) self.assertFalse(retval) self.assertIn("1 failed", retstring) From 7f16728467fd24267d03cfadccc436cc1048f41a Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Thu, 31 Jul 2025 16:11:10 +0100 Subject: [PATCH 03/13] feat(plot-time-series): refactor plotting from data --- pyneuroml/lems/__init__.py | 219 +----------------------- pyneuroml/plot/PlotTimeSeries.py | 170 +++++++++++-------- pyneuroml/runners.py | 274 ++++++++----------------------- pyneuroml/utils/simdata.py | 266 ++++++++++++++++++++++++++++++ 4 files changed, 443 insertions(+), 486 deletions(-) create mode 100644 pyneuroml/utils/simdata.py diff --git a/pyneuroml/lems/__init__.py b/pyneuroml/lems/__init__.py index f2e785ae..116a724a 100644 --- a/pyneuroml/lems/__init__.py +++ b/pyneuroml/lems/__init__.py @@ -4,13 +4,11 @@ import random import shutil import typing -from datetime import datetime import neuroml -from lxml import etree +import pyneuroml.pynml as pynml2 from pyneuroml.lems.LEMSSimulation import LEMSSimulation -from pyneuroml.pynml import read_neuroml2_file from pyneuroml.utils.plot import get_next_hex_color logger = logging.getLogger(__name__) @@ -145,10 +143,10 @@ def generate_lems_file_for_neuroml( ls = LEMSSimulation(sim_id, duration, dt, target, simulation_seed=simulation_seed) if nml_doc is None: - nml_doc = read_neuroml2_file( + nml_doc = pynml2.read_neuroml2_file( neuroml_file, include_includes=True, verbose=verbose ) - nml_doc_inc_not_included = read_neuroml2_file( + nml_doc_inc_not_included = pynml2.read_neuroml2_file( neuroml_file, include_includes=False, verbose=False ) else: @@ -233,7 +231,7 @@ def generate_lems_file_for_neuroml( ls.include_neuroml2_file(include.href, include_included=False) try: - sub_doc = read_neuroml2_file(incl_curr) + sub_doc = pynml2.read_neuroml2_file(incl_curr) sub_dir = ( os.path.dirname(incl_curr) if len(os.path.dirname(incl_curr)) > 0 @@ -481,212 +479,3 @@ def get_pop_index(quantity): pop = s[0] index = int(s[1]) return pop, index - - -def load_sim_data_from_lems_file( - lems_file_name: str, - base_dir: str = ".", - get_events: bool = False, - get_traces: bool = True, - t_run: datetime = datetime(1900, 1, 1), - remove_dat_files_after_load: bool = False, -) -> typing.Tuple[ - typing.Optional[typing.Dict[str, typing.Dict]], - typing.Optional[typing.Dict[str, typing.Dict]], -]: - """Load simulation outputs from LEMS simulation run. - - .. versionadded:: 1.2.2 - - :param lems_file_name: name of LEMS file that was used to generate the data - :type lems_file_name: str - :param base_dir: directory to run in - :type base_dir: str - :param t_run: time of run - :type t_run: datetime - :param get_events: toggle whether events should be loaded - :type get_events: bool - :param get_traces: toggle whether traces should be loaded - :type get_traces: bool - :param remove_dat_files_after_load: toggle if data files should be deleted after they've been loaded - :type remove_dat_files_after_load: bool - - :returns: if both `get_events` and `get_traces` are selected, a tuple with - two dictionaries of dictionaries, one for traces, one for events, is - returned: - - .. code-block:: python - - all_traces, all_events - - Otherwise one dictionary of dictionaries for whichever was selected is - returned, with None for the other. - - The events dictionary has the following format: - - .. code-block:: python - - { - "outputfile": - { - '': { 'cell id': [] } - } - } - - The traces dictionary has the following format: - - .. code-block:: python - - { - "outputfile": - { - 't': [], - 'col 1': [] - 'col 2': [] - } - } - - Each list has multiple dictionaries, one each for each output file in - the LEMS file. - - :raises ValueError: if neither traces nor events are selected for loading - :raises OSError: if simulation output data file could not be found - :raises Exception: if the output file has not been modified since the - simulation was run (given as `t_run`) - """ - if not get_events and not get_traces: - raise ValueError("One of get_events or get_traces must be True") - - all_traces: typing.Dict[str, typing.Dict] = {} - all_events: typing.Dict[str, typing.Dict] = {} - - if not os.path.isfile(lems_file_name): - real_lems_file = os.path.realpath(os.path.join(base_dir, lems_file_name)) - else: - real_lems_file = os.path.realpath(lems_file_name) - - logger.debug( - f"Reloading data specified in LEMS file: {lems_file_name} ({real_lems_file}), base_dir: {base_dir}, cwd: {os.getcwd()}" - ) - - base_lems_file_path = os.path.dirname(os.path.realpath(lems_file_name)) - tree = etree.parse(real_lems_file) - - sim = tree.getroot().find("Simulation") - ns_prefix = "" - - possible_prefixes = ["{http://www.neuroml.org/lems/0.7.2}"] - if sim is None: - # print(tree.getroot().nsmap) - # print(tree.getroot().getchildren()) - for pre in possible_prefixes: - for comp in tree.getroot().findall(pre + "Component"): - if comp.attrib["type"] == "Simulation": - ns_prefix = pre - sim = comp - - if get_events: - event_output_files = sim.findall(ns_prefix + "EventOutputFile") - for i, of in enumerate(event_output_files): - events: typing.Dict = {} - - name = of.attrib["fileName"] - file_name = os.path.join(base_dir, name) - if not os.path.isfile(file_name): # If not relative to the LEMS file... - file_name = os.path.join(base_lems_file_path, name) - - # if not os.path.isfile(file_name): # If not relative to the LEMS file... - # file_name = os.path.join(os.getcwd(),name) - # ... try relative to cwd. - # if not os.path.isfile(file_name): # If not relative to the LEMS file... - # file_name = os.path.join(os.getcwd(),'NeuroML2','results',name) - # ... try relative to cwd in NeuroML2/results subdir. - if not os.path.isfile(file_name): # If not relative to the base dir... - raise OSError(f"Could not find simulation output file {file_name}") - - format_ = of.attrib["format"] - logger.info(f"Loading saved events from {file_name} (format: {format_})") - - selections = {} - for col in of.findall(ns_prefix + "EventSelection"): - id_ = int(col.attrib["id"]) - select = col.attrib["select"] - events[select] = [] - selections[id_] = select - - with open(file_name) as f: - for line in f: - values = line.split() - if format_ == "TIME_ID": - t = float(values[0]) - id_ = int(values[1]) - elif format_ == "ID_TIME": - id_ = int(values[0]) - t = float(values[1]) - logger.debug( - f"Found a event in cell {id_} ({selections[id_]}) at t = {t}" - ) - events[selections[id_]].append(t) - - if remove_dat_files_after_load: - logger.warning( - f"Removing file {file_name} after having loading its data!" - ) - os.remove(file_name) - all_events[name] = events - - if get_traces: - output_files = sim.findall(ns_prefix + "OutputFile") - for i, of in enumerate(output_files): - traces: typing.Dict = {} - traces["t"] = [] - name = of.attrib["fileName"] - file_name = os.path.join(base_dir, name) - - if not os.path.isfile(file_name): # If not relative to the LEMS file... - file_name = os.path.join(base_lems_file_path, name) - - if not os.path.isfile(file_name): # If not relative to the LEMS file... - file_name = os.path.join(os.getcwd(), name) - - # ... try relative to cwd. - if not os.path.isfile(file_name): # If not relative to the LEMS file... - file_name = os.path.join(os.getcwd(), "NeuroML2", "results", name) - # ... try relative to cwd in NeuroML2/results subdir. - if not os.path.isfile(file_name): # If not relative to the LEMS file... - raise OSError(f"Could not find simulation output file {file_name}") - - t_file_mod = datetime.fromtimestamp(os.path.getmtime(file_name)) - if t_file_mod < t_run: - raise Exception( - f"Expected output file {file_name}s has not been modified since " - f"{t_file_mod} but the simulation was run later at {t_run}." - ) - - cols = [] - cols.append("t") - for col in of.findall(ns_prefix + "OutputColumn"): - quantity = col.attrib["quantity"] - traces[quantity] = [] - cols.append(quantity) - - with open(file_name) as f: - for line in f: - values = line.split() - for vi in range(len(values)): - traces[cols[vi]].append(float(values[vi])) - - if remove_dat_files_after_load: - logger.warning( - f"Removing file {file_name}s after having loading its data!" - ) - os.remove(file_name) - - all_traces[name] = traces - - if get_events and get_traces: - return all_traces, all_events - elif get_traces: - return all_traces, None - else: - return None, all_events diff --git a/pyneuroml/plot/PlotTimeSeries.py b/pyneuroml/plot/PlotTimeSeries.py index ab078039..8da9586c 100644 --- a/pyneuroml/plot/PlotTimeSeries.py +++ b/pyneuroml/plot/PlotTimeSeries.py @@ -16,12 +16,12 @@ from matplotlib import pyplot as plt from matplotlib_scalebar.scalebar import ScaleBar -import pyneuroml.lems as pynmll import pyneuroml.plot.Plot as pynmlplt +import pyneuroml.utils.simdata as simd from pyneuroml.utils.cli import build_namespace logger = logging.getLogger(__name__) -logger.setLevel(logging.WARNING) +logger.setLevel(logging.WARN) TIME_SERIES_PLOTTER_DEFAULTS = { @@ -37,7 +37,7 @@ def plot_time_series( typing.List[typing.Dict[typing.Any, typing.Any]], ], title: str = "", - offset: bool = True, + offset: bool = False, show_plot_already: bool = True, scalebar_location: typing.Optional[str] = None, scalebar_length: typing.Optional[float] = None, @@ -176,6 +176,8 @@ def plot_time_series( ) print(f"Note: length of the scalebar is {scalebar_length} units") + + assert ax ax.add_artist(scalebar_) if show_plot_already is True: @@ -187,7 +189,9 @@ def plot_time_series_from_lems_file( base_dir: str = ".", title: str = "", labels: bool = True, + single_plot: bool = False, show_plot_already: bool = True, + save_figure_to: typing.Optional[str] = None, **kwargs, ) -> None: """Plot time series from a LEMS file. @@ -215,35 +219,30 @@ def plot_time_series_from_lems_file( :returns: None """ - show_each_plot_already = True - all_traces = pynmll.load_sim_data_from_lems_file( - lems_file_name, get_events=False, get_traces=True + # the user wants to see the plots + all_traces, _ = simd.load_sim_data_from_lems_file( + lems_file_name, base_dir, get_events=False, get_traces=True + ) + assert all_traces + _plot_traces( + all_traces, + title, + labels, + single_plot, + show_plot_already, + save_figure_to, + **kwargs, ) - - if len(all_traces) > 1: - show_each_plot_already = False - - for traces in all_traces: - plot_time_series( - traces, - labels=labels, - xaxis="Time (s)", - show_plot_already=show_each_plot_already and show_plot_already, - **kwargs, - ) - - # show all plots together at end - # if show_each_plot_already is True, each plot will pop up as its plotted - if show_plot_already is True and show_each_plot_already is False: - plt.show() def plot_time_series_from_data_files( data_file_names: typing.Union[str, typing.List[str]], + title: str = "", labels: bool = True, columns: typing.Optional[typing.List[int]] = None, single_plot: bool = False, show_plot_already: bool = True, + save_figure_to: typing.Optional[str] = None, **kwargs, ): """Plot time series from a data file. @@ -260,68 +259,101 @@ def plot_time_series_from_data_files( :type columns: list of ints: [1, 2, 3] :param single_plot: whether all data should be plotted in one single plot :type single_plot: bool + :param save_figure_to: file to save figure to + note that if there are multiple figures, this is ignored and the files + are named based on the input files + :type save_figure_to: str :param kwargs: other key word arguments that are passed to the `plot_time_series` function """ - show_each_plot_already = True - save_each_plot = False - if isinstance(data_file_names, str): - data_file_names = [data_file_names] - - if len(data_file_names) > 1 and single_plot is False: - show_each_plot_already = False - - # makes no sense as it'll keep being overwritten by each new plot - # so we ignore it and inform the user - if "save_figure_to" in kwargs: - logger.warning( - "Multiple files given and single_plot not set. Ignoring 'save_figure_to'" - ) - logger.warning("Files will be saved to .png") - kwargs.pop("save_figure_to") - save_each_plot = True - - if len(data_file_names) == 1: - single_plot = True - - traces = {} - filenum = 0 - for f in data_file_names: - print(f"Processing: {f}") - filenum += 1 - - data_array = numpy.loadtxt(f) - traces["t"] = data_array[:, 0] - num_cols = numpy.shape(data_array)[1] - for i in range(1, num_cols, 1): - if columns and len(columns) > 0: - if i not in columns: - logger.warning(f"Skipping column {i}") - continue + all_traces = simd.load_traces_from_data_file(data_file_names, columns) + assert all_traces + _plot_traces( + all_traces, + title, + labels, + single_plot, + show_plot_already, + save_figure_to, + **kwargs, + ) + + +def _plot_traces( + all_traces: typing.Dict[str, typing.Dict], + title: str = "", + labels: bool = True, + single_plot: bool = False, + show_plot_already: bool = False, + save_figure_to: typing.Optional[str] = None, + **kwargs, +) -> None: + """Worker function for plotting traces. - if single_plot: - traces[f"{f}_{filenum}_{i}"] = data_array[:, i] + :param all_traces: dict with traces + :param labels: toggle whether plots should be labelled + :type labels: bool + :param single_plot: whether all data should be plotted in one single plot + :type single_plot: bool + :param save_figure_to: file to save figure to + note that if there are multiple figures, this is ignored and the files + are named based on the input files + :type save_figure_to: str + :param show_plot_already: whether the generated plots should be shown: + useful if you want to only save the plots to files without showing them + :type show_plot_already: bool + :returns: None + + """ + show_plots = show_plot_already + if len(all_traces) > 1 and single_plot is False and save_figure_to: + # we will show all plots together at the end instead of one by one, + # which blocks on each plot + show_plot_already = False + + # let the user know + logger.warning( + "Ignoring 'save_figure_to': each plot will be saved in individual files of the form .png" + ) + + if not single_plot: + for f, traces in all_traces.items(): + if save_figure_to: + if len(all_traces) > 1: + file_name = f"{f}.png" + else: + file_name = save_figure_to else: - traces[f"{f}_{i}"] = data_array[:, i] + file_name = None - if not single_plot: plot_time_series( traces, + offset=False, labels=labels, - show_plot_already=show_each_plot_already and show_plot_already, - save_figure_to=f"{f}.png" if save_each_plot else None, + show_plot_already=(show_plots and show_plot_already), + save_figure_to=file_name, **kwargs, ) - traces = {} + else: + # all traces in one single plot + flat_traces = {} + for f, traces in all_traces.items(): + flat_traces.update(traces) - if single_plot: plot_time_series( - traces, labels=labels, show_plot_already=show_plot_already, **kwargs + flat_traces, + offset=False, + labels=labels, + show_plot_already=(show_plots and show_plot_already), + save_figure_to=save_figure_to, + **kwargs, ) - if show_plot_already is True and show_each_plot_already is False: - plt.plot() + # if the user wants to see the plots, but we delayed to show them all + # together + if show_plots and show_plot_already: + plt.show() def _process_time_series_plotter_args(): diff --git a/pyneuroml/runners.py b/pyneuroml/runners.py index 37efa376..06fd5232 100644 --- a/pyneuroml/runners.py +++ b/pyneuroml/runners.py @@ -9,7 +9,6 @@ import inspect import logging -import math import os import pathlib import shlex @@ -24,10 +23,12 @@ from typing import Optional import ppft as pp -from lxml import etree +import pyneuroml.lems as pynmll +import pyneuroml.plot.PlotTimeSeries as pynmlt import pyneuroml.utils import pyneuroml.utils.misc +import pyneuroml.utils.simdata as pynmls from pyneuroml import DEFAULTS, __version__ from pyneuroml.errors import UNKNOWN_ERR @@ -1012,231 +1013,100 @@ def reload_saved_data( show_plot_already: bool = True, simulator: typing.Optional[str] = None, reload_events: bool = False, + reload_traces: bool = True, verbose: bool = DEFAULTS["v"], remove_dat_files_after_load: bool = False, -) -> typing.Union[dict, typing.Tuple[dict, dict]]: +) -> typing.Tuple[ + typing.Optional[typing.Dict[str, typing.Dict]], + typing.Optional[typing.Dict[str, typing.Dict]], +]: """Reload data saved from previous LEMS simulation run. + It can also plot the traces from the data. Each "OutputFile" is plotted in + a separate plot. + + .. seealso:: + + the :py:mod:pyneuroml.plot.PlotTimeSeries module + Module for plotting time series + + the :py:mod:pyneuroml.plot.simdata module + Module for loading simulation data + :param lems_file_name: name of LEMS file that was used to generate the data :type lems_file_name: str :param base_dir: directory to run in :type base_dir: str :param t_run: time of run :type t_run: datetime - :param plot: toggle plotting + :param plot: toggle plotting of traces :type plot: bool :param show_plot_already: toggle if plots should be shown :type show_plot_already: bool :param simulator: simulator that was used to generate data :type simulator: str - :param reload_event: toggle whether events should be loaded - :type reload_event: bool - :param verbose: toggle verbose output - :type verbose: bool + :param reload_events: toggle whether events should be loaded + :type reload_events: bool + :param reload_traces: toggle whether traces should be loaded + :type reload_traces: bool :param remove_dat_files_after_load: toggle if data files should be deleted after they've been loaded :type remove_dat_files_after_load: bool + :returns: if both `get_events` and `get_traces` are selected, a tuple with + two dictionaries of dictionaries, one for traces, one for events, is + returned: - TODO: remove unused vebose argument (needs checking to see if is being - used in other places) - """ - if not os.path.isfile(lems_file_name): - real_lems_file = os.path.realpath(os.path.join(base_dir, lems_file_name)) - else: - real_lems_file = os.path.realpath(lems_file_name) + .. code-block:: python - logger.debug( - "Reloading data specified in LEMS file: %s (%s), base_dir: %s, cwd: %s; plotting %s" - % (lems_file_name, real_lems_file, base_dir, os.getcwd(), show_plot_already) - ) + all_traces, all_events - # Could use pylems to parse all this... - traces = {} # type: dict - events = {} # type: dict - - if plot: - import matplotlib.pyplot as plt - - base_lems_file_path = os.path.dirname(os.path.realpath(lems_file_name)) - tree = etree.parse(real_lems_file) - - sim = tree.getroot().find("Simulation") - ns_prefix = "" - - possible_prefixes = ["{http://www.neuroml.org/lems/0.7.2}"] - if sim is None: - # print(tree.getroot().nsmap) - # print(tree.getroot().getchildren()) - for pre in possible_prefixes: - for comp in tree.getroot().findall(pre + "Component"): - if comp.attrib["type"] == "Simulation": - ns_prefix = pre - sim = comp - - if reload_events: - event_output_files = sim.findall(ns_prefix + "EventOutputFile") - for i, of in enumerate(event_output_files): - name = of.attrib["fileName"] - file_name = os.path.join(base_dir, name) - if not os.path.isfile(file_name): # If not relative to the LEMS file... - file_name = os.path.join(base_lems_file_path, name) - - # if not os.path.isfile(file_name): # If not relative to the LEMS file... - # file_name = os.path.join(os.getcwd(),name) - # ... try relative to cwd. - # if not os.path.isfile(file_name): # If not relative to the LEMS file... - # file_name = os.path.join(os.getcwd(),'NeuroML2','results',name) - # ... try relative to cwd in NeuroML2/results subdir. - if not os.path.isfile(file_name): # If not relative to the base dir... - raise OSError(("Could not find simulation output file %s" % file_name)) - format = of.attrib["format"] - logger.info( - "Loading saved events from %s (format: %s)" % (file_name, format) - ) - selections = {} - for col in of.findall(ns_prefix + "EventSelection"): - id = int(col.attrib["id"]) - select = col.attrib["select"] - events[select] = [] - selections[id] = select - - with open(file_name) as f: - for line in f: - values = line.split() - if format == "TIME_ID": - t = float(values[0]) - id = int(values[1]) - elif format == "ID_TIME": - id = int(values[0]) - t = float(values[1]) - logger.debug( - "Found a event in cell %s (%s) at t = %s" - % (id, selections[id], t) - ) - events[selections[id]].append(t) - - if remove_dat_files_after_load: - logger.warning( - "Removing file %s after having loading its data!" % file_name - ) - os.remove(file_name) - - output_files = sim.findall(ns_prefix + "OutputFile") - n_output_files = len(output_files) - if plot: - rows = int(max(1, math.ceil(n_output_files / float(3)))) - columns = min(3, n_output_files) - fig, ax = plt.subplots( - rows, columns, sharex=True, figsize=(8 * columns, 4 * rows) - ) - if n_output_files > 1: - ax = ax.ravel() - - for i, of in enumerate(output_files): - traces["t"] = [] - name = of.attrib["fileName"] - file_name = os.path.join(base_dir, name) - - if not os.path.isfile(file_name): # If not relative to the LEMS file... - file_name = os.path.join(base_lems_file_path, name) - - if not os.path.isfile(file_name): # If not relative to the LEMS file... - file_name = os.path.join(os.getcwd(), name) - - # ... try relative to cwd. - if not os.path.isfile(file_name): # If not relative to the LEMS file... - file_name = os.path.join(os.getcwd(), "NeuroML2", "results", name) - # ... try relative to cwd in NeuroML2/results subdir. - if not os.path.isfile(file_name): # If not relative to the LEMS file... - raise OSError(("Could not find simulation output file %s" % file_name)) - t_file_mod = datetime.fromtimestamp(os.path.getmtime(file_name)) - if t_file_mod < t_run: - raise Exception( - "Expected output file %s has not been modified since " - "%s but the simulation was run later at %s." - % (file_name, t_file_mod, t_run) - ) + Otherwise one dictionary of dictionaries for whichever was selected is + returned, with None for the other. - logger.debug( - "Loading saved data from %s%s" - % (file_name, " (%s)" % simulator if simulator else "") - ) + The events dictionary has the following format: - cols = [] - cols.append("t") - for col in of.findall(ns_prefix + "OutputColumn"): - quantity = col.attrib["quantity"] - traces[quantity] = [] - cols.append(quantity) - - with open(file_name) as f: - for line in f: - values = line.split() - for vi in range(len(values)): - traces[cols[vi]].append(float(values[vi])) - - if remove_dat_files_after_load: - logger.warning( - "Removing file %s after having loading its data!" % file_name - ) - os.remove(file_name) + .. code-block:: python - if plot: - info = "Data loaded from %s%s" % ( - file_name, - " (%s)" % simulator if simulator else "", - ) - logger.warning("Reloading: %s" % info) - plt.get_current_fig_manager().set_window_title(info) + { + "outputfile": + { + '': { 'cell id': [] } + } + } - legend = False - for key in cols: - if n_output_files > 1: - ax_ = ax[i] - else: - ax_ = ax - ax_.set_xlabel("Time (ms)") - ax_.set_ylabel("(SI units...)") - ax_.xaxis.grid(True) - ax_.yaxis.grid(True) - - if key != "t": - ax_.plot(traces["t"], traces[key], label=key) - logger.debug("Adding trace for: %s, from: %s" % (key, file_name)) - ax_.used = True - legend = True - - if legend: - if n_output_files > 1: - ax_.legend( - loc="upper right", fancybox=True, shadow=True, ncol=4 - ) # ,bbox_to_anchor=(0.5, -0.05)) - else: - ax_.legend( - loc="upper center", - bbox_to_anchor=(0.5, -0.05), - fancybox=True, - shadow=True, - ncol=4, - ) - - # print(traces.keys()) - - if plot and show_plot_already: - if n_output_files > 1: - ax_ = ax - else: - ax_ = [ax] - for axi in ax_: - if not hasattr(axi, "used") or not axi.used: - axi.axis("off") - plt.tight_layout() - plt.show() - - if reload_events: - return traces, events - else: - return traces + The traces dictionary has the following format: + + .. code-block:: python + + { + "outputfile": + { + 't': [], + 'col 1': [] + 'col 2': [] + } + } + + Each list has multiple dictionaries, one each for each output file in + the LEMS file. + """ + all_traces: typing.Optional[typing.Dict] = None + all_events: typing.Optional[typing.Dict] = None + all_traces, all_events = pynmls.load_sim_data_from_lems_file( + lems_file_name=lems_file_name, + base_dir=base_dir, + get_events=reload_events, + get_traces=reload_traces, + t_run=t_run, + remove_dat_files_after_load=remove_dat_files_after_load, + ) + + if all_traces and plot: + pynmlt._plot_traces( + all_traces, show_plot_already=show_plot_already, single_plot=False + ) + + return all_traces, all_events def generate_sim_scripts_in_folder( diff --git a/pyneuroml/utils/simdata.py b/pyneuroml/utils/simdata.py new file mode 100644 index 00000000..83770ebd --- /dev/null +++ b/pyneuroml/utils/simdata.py @@ -0,0 +1,266 @@ +#!/usr/bin/env python3 +""" +Utilities for analysis of simulation generated data. + +File: pyneuroml/utils/simdata.py + +Copyright 2025 NeuroML contributors +Author: Ankur Sinha +""" + +import logging +import os +import typing +from datetime import datetime + +import numpy +from lxml import etree + +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) + + +def load_traces_from_data_file( + data_file_names: typing.Union[str, typing.List[str]], + columns: typing.Optional[typing.List[int]] = None, +) -> typing.Dict[str, typing.Dict]: + """Load traces from a data file. + + :param data_file_names: one or more names of data files + :type data_file_names: str or list(str) + :param columns: column indices to plot + :type columns: list of ints: [1, 2, 3] + :returns: TODO + + """ + if isinstance(data_file_names, str): + data_file_names = [data_file_names] + + all_traces = {} + for f in data_file_names: + traces = {} + logger.info(f"Processing: {f}") + + data_array = numpy.loadtxt(f) + traces["t"] = data_array[:, 0] + num_cols = numpy.shape(data_array)[1] + for i in range(1, num_cols, 1): + if columns and len(columns) > 0: + if i not in columns: + logger.warning(f"Skipping column {i}") + continue + + traces[f"{f}_{i}"] = data_array[:, i] + + all_traces[f] = traces + + return all_traces + + +def load_sim_data_from_lems_file( + lems_file_name: str, + base_dir: str = ".", + get_events: bool = False, + get_traces: bool = True, + t_run: datetime = datetime(1900, 1, 1), + remove_dat_files_after_load: bool = False, +) -> typing.Tuple[ + typing.Optional[typing.Dict[str, typing.Dict]], + typing.Optional[typing.Dict[str, typing.Dict]], +]: + """Load simulation outputs from LEMS simulation run. + + .. versionadded:: 1.2.2 + + :param lems_file_name: name of LEMS file that was used to generate the data + :type lems_file_name: str + :param base_dir: directory to run in + :type base_dir: str + :param t_run: time of run + :type t_run: datetime + :param get_events: toggle whether events should be loaded + :type get_events: bool + :param get_traces: toggle whether traces should be loaded + :type get_traces: bool + :param remove_dat_files_after_load: toggle if data files should be deleted after they've been loaded + :type remove_dat_files_after_load: bool + + :returns: if both `get_events` and `get_traces` are selected, a tuple with + two dictionaries of dictionaries, one for traces, one for events, is + returned: + + .. code-block:: python + + all_traces, all_events + + Otherwise one dictionary of dictionaries for whichever was selected is + returned, with None for the other. + + The events dictionary has the following format: + + .. code-block:: python + + { + "outputfile": + { + '': { 'cell id': [] } + } + } + + The traces dictionary has the following format: + + .. code-block:: python + + { + "outputfile": + { + 't': [], + 'col 1': [] + 'col 2': [] + } + } + + Each list has multiple dictionaries, one each for each output file in + the LEMS file. + + :raises ValueError: if neither traces nor events are selected for loading + :raises OSError: if simulation output data file could not be found + :raises Exception: if the output file has not been modified since the + simulation was run (given as `t_run`) + """ + if not get_events and not get_traces: + raise ValueError("One of get_events or get_traces must be True") + + all_traces: typing.Dict[str, typing.Dict] = {} + all_events: typing.Dict[str, typing.Dict] = {} + + if not os.path.isfile(lems_file_name): + real_lems_file = os.path.realpath(os.path.join(base_dir, lems_file_name)) + else: + real_lems_file = os.path.realpath(lems_file_name) + + logger.debug( + f"Reloading data specified in LEMS file: {lems_file_name} ({real_lems_file}), base_dir: {base_dir}, cwd: {os.getcwd()}" + ) + + base_lems_file_path = os.path.dirname(os.path.realpath(lems_file_name)) + tree = etree.parse(real_lems_file) + + sim = tree.getroot().find("Simulation") + ns_prefix = "" + + possible_prefixes = ["{http://www.neuroml.org/lems/0.7.2}"] + if sim is None: + # print(tree.getroot().nsmap) + # print(tree.getroot().getchildren()) + for pre in possible_prefixes: + for comp in tree.getroot().findall(pre + "Component"): + if comp.attrib["type"] == "Simulation": + ns_prefix = pre + sim = comp + + if get_events: + event_output_files = sim.findall(ns_prefix + "EventOutputFile") + for i, of in enumerate(event_output_files): + events: typing.Dict = {} + + name = of.attrib["fileName"] + file_name = os.path.join(base_dir, name) + if not os.path.isfile(file_name): # If not relative to the LEMS file... + file_name = os.path.join(base_lems_file_path, name) + + # if not os.path.isfile(file_name): # If not relative to the LEMS file... + # file_name = os.path.join(os.getcwd(),name) + # ... try relative to cwd. + # if not os.path.isfile(file_name): # If not relative to the LEMS file... + # file_name = os.path.join(os.getcwd(),'NeuroML2','results',name) + # ... try relative to cwd in NeuroML2/results subdir. + if not os.path.isfile(file_name): # If not relative to the base dir... + raise OSError(f"Could not find simulation output file {file_name}") + + format_ = of.attrib["format"] + logger.info(f"Loading saved events from {file_name} (format: {format_})") + + selections = {} + for col in of.findall(ns_prefix + "EventSelection"): + id_ = int(col.attrib["id"]) + select = col.attrib["select"] + events[select] = [] + selections[id_] = select + + with open(file_name) as f: + for line in f: + values = line.split() + if format_ == "TIME_ID": + t = float(values[0]) + id_ = int(values[1]) + elif format_ == "ID_TIME": + id_ = int(values[0]) + t = float(values[1]) + logger.debug( + f"Found a event in cell {id_} ({selections[id_]}) at t = {t}" + ) + events[selections[id_]].append(t) + + if remove_dat_files_after_load: + logger.warning( + f"Removing file {file_name} after having loading its data!" + ) + os.remove(file_name) + all_events[name] = events + + if get_traces: + output_files = sim.findall(ns_prefix + "OutputFile") + for i, of in enumerate(output_files): + traces: typing.Dict = {} + traces["t"] = [] + name = of.attrib["fileName"] + file_name = os.path.join(base_dir, name) + + if not os.path.isfile(file_name): # If not relative to the LEMS file... + file_name = os.path.join(base_lems_file_path, name) + + if not os.path.isfile(file_name): # If not relative to the LEMS file... + file_name = os.path.join(os.getcwd(), name) + + # ... try relative to cwd. + if not os.path.isfile(file_name): # If not relative to the LEMS file... + file_name = os.path.join(os.getcwd(), "NeuroML2", "results", name) + # ... try relative to cwd in NeuroML2/results subdir. + if not os.path.isfile(file_name): # If not relative to the LEMS file... + raise OSError(f"Could not find simulation output file {file_name}") + + t_file_mod = datetime.fromtimestamp(os.path.getmtime(file_name)) + if t_file_mod < t_run: + raise Exception( + f"Expected output file {file_name}s has not been modified since " + f"{t_file_mod} but the simulation was run later at {t_run}." + ) + + cols = [] + cols.append("t") + for col in of.findall(ns_prefix + "OutputColumn"): + quantity = col.attrib["quantity"] + traces[quantity] = [] + cols.append(quantity) + + with open(file_name) as f: + for line in f: + values = line.split() + for vi in range(len(values)): + traces[cols[vi]].append(float(values[vi])) + + if remove_dat_files_after_load: + logger.warning( + f"Removing file {file_name}s after having loading its data!" + ) + os.remove(file_name) + + all_traces[name] = traces + + if get_events and get_traces: + return all_traces, all_events + elif get_traces: + return all_traces, None + else: + return None, all_events From b13c46d4573a7223e178339220dcf303a400ea36 Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Thu, 31 Jul 2025 21:55:30 +0100 Subject: [PATCH 04/13] feat: tweak imports to prevent circular imports The `pynml` bit that imports everything is really creating issues. We need to move everything out of there and only let it be a top level import module --- pyneuroml/archive/__init__.py | 18 +++++++------ pyneuroml/plot/PlotSpikes.py | 4 +-- pyneuroml/plot/PlotTimeSeries.py | 25 ++++++++++--------- pyneuroml/runners.py | 1 - pyneuroml/validators.py | 10 ++++---- tests/archive/test_archive.py | 2 +- .../test_lems.py => utils/test_simdata.py} | 10 ++++---- 7 files changed, 36 insertions(+), 34 deletions(-) rename tests/{lems/test_lems.py => utils/test_simdata.py} (95%) diff --git a/pyneuroml/archive/__init__.py b/pyneuroml/archive/__init__.py index 9f9d5d9e..b522b61a 100644 --- a/pyneuroml/archive/__init__.py +++ b/pyneuroml/archive/__init__.py @@ -14,10 +14,10 @@ import typing from zipfile import ZipFile -from pyneuroml.runners import run_jneuroml -from pyneuroml.sedml import validate_sedml_files -from pyneuroml.utils import get_model_file_list -from pyneuroml.utils.cli import build_namespace +import pyneuroml.runners as pynmlr +import pyneuroml.sedml as pynmls +import pyneuroml.utils as pynmlu +import pyneuroml.utils.cli as pynmluc logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) @@ -85,7 +85,7 @@ def main(args=None): def cli(a: typing.Optional[typing.Any] = None, **kwargs: str): """Main cli caller method""" - a = build_namespace(DEFAULTS, a, **kwargs) + a = pynmluc.build_namespace(DEFAULTS, a, **kwargs) rootfile = a.rootfile zipfile_extension = ".neux.zip" @@ -96,13 +96,13 @@ def cli(a: typing.Optional[typing.Any] = None, **kwargs: str): a.rootfile.startswith("LEMS") and a.rootfile.endswith(".xml") ) and a.sedml is True: logger.debug("Generating SED-ML file from LEMS file") - run_jneuroml("", a.rootfile, "-sedml") + pynmlr.run_jneuroml("", a.rootfile, "-sedml") rootfile = a.rootfile.replace(".xml", ".sedml") zipfile_extension = ".omex.zip" # validate the generated file - validate_sedml_files([rootfile]) + pynmls.validate_sedml_files([rootfile]) # if explicitly given, use that if a.zipfile_extension is not None: @@ -175,7 +175,9 @@ def create_combine_archive( lems_def_dir = None if len(filelist) == 0: - lems_def_dir = get_model_file_list(rootfile, filelist, rootdir, lems_def_dir) + lems_def_dir = pynmlu.get_model_file_list( + rootfile, filelist, rootdir, lems_def_dir + ) create_combine_archive_manifest(rootfile, filelist + extra_files, rootdir) filelist.append("manifest.xml") diff --git a/pyneuroml/plot/PlotSpikes.py b/pyneuroml/plot/PlotSpikes.py index d414b870..3d9ac8d9 100644 --- a/pyneuroml/plot/PlotSpikes.py +++ b/pyneuroml/plot/PlotSpikes.py @@ -18,7 +18,7 @@ import matplotlib.pyplot as plt import numpy as np -import pyneuroml.lems as pynmll +import pyneuroml.utils.simdata as pynmls from pyneuroml.plot import generate_plot from pyneuroml.utils.cli import build_namespace @@ -493,7 +493,7 @@ def plot_spikes_from_lems_file( :return: None :rtype: None """ - all_events: List[Dict] = pynmll.load_sim_data_from_lems_file( + all_events: List[Dict] = pynmls.load_sim_data_from_lems_file( lems_file_name, get_events=True, get_traces=False ) diff --git a/pyneuroml/plot/PlotTimeSeries.py b/pyneuroml/plot/PlotTimeSeries.py index 8da9586c..4b9a3c0d 100644 --- a/pyneuroml/plot/PlotTimeSeries.py +++ b/pyneuroml/plot/PlotTimeSeries.py @@ -225,12 +225,12 @@ def plot_time_series_from_lems_file( ) assert all_traces _plot_traces( - all_traces, - title, - labels, - single_plot, - show_plot_already, - save_figure_to, + all_traces=all_traces, + title=title, + labels=labels, + single_plot=single_plot, + show_plot_already=show_plot_already, + save_figure_to=save_figure_to, **kwargs, ) @@ -270,12 +270,12 @@ def plot_time_series_from_data_files( all_traces = simd.load_traces_from_data_file(data_file_names, columns) assert all_traces _plot_traces( - all_traces, - title, - labels, - single_plot, - show_plot_already, - save_figure_to, + all_traces=all_traces, + title=title, + labels=labels, + single_plot=single_plot, + show_plot_already=show_plot_already, + save_figure_to=save_figure_to, **kwargs, ) @@ -284,6 +284,7 @@ def _plot_traces( all_traces: typing.Dict[str, typing.Dict], title: str = "", labels: bool = True, + offset: bool = False, single_plot: bool = False, show_plot_already: bool = False, save_figure_to: typing.Optional[str] = None, diff --git a/pyneuroml/runners.py b/pyneuroml/runners.py index 06fd5232..d5173c1c 100644 --- a/pyneuroml/runners.py +++ b/pyneuroml/runners.py @@ -24,7 +24,6 @@ import ppft as pp -import pyneuroml.lems as pynmll import pyneuroml.plot.PlotTimeSeries as pynmlt import pyneuroml.utils import pyneuroml.utils.misc diff --git a/pyneuroml/validators.py b/pyneuroml/validators.py index e7325020..5d57e83e 100644 --- a/pyneuroml/validators.py +++ b/pyneuroml/validators.py @@ -11,8 +11,8 @@ import typing import warnings +import pyneuroml.runners as pynmlr from pyneuroml import DEFAULTS -from pyneuroml.runners import run_jneuroml logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) @@ -44,7 +44,7 @@ def validate_neuroml1( stacklevel=2, ) - return run_jneuroml( + return pynmlr.run_jneuroml( pre_args, nml1_file_name, post_args, @@ -77,7 +77,7 @@ def validate_neuroml2( post_args = "" if max_memory is not None: - return run_jneuroml( + return pynmlr.run_jneuroml( pre_args, nml2_file_name, post_args, @@ -88,7 +88,7 @@ def validate_neuroml2( return_string=return_string, ) else: - return run_jneuroml( + return pynmlr.run_jneuroml( pre_args, nml2_file_name, post_args, @@ -120,7 +120,7 @@ def validate_neuroml2_lems_file( post_args = "" post_args += "-norun" - return run_jneuroml( + return pynmlr.run_jneuroml( "", nml2_lems_file_name, post_args, diff --git a/tests/archive/test_archive.py b/tests/archive/test_archive.py index 0a4161d6..eabf80b6 100644 --- a/tests/archive/test_archive.py +++ b/tests/archive/test_archive.py @@ -14,9 +14,9 @@ from pyneuroml.archive import ( create_combine_archive, create_combine_archive_manifest, - get_model_file_list, ) from pyneuroml.runners import run_jneuroml +from pyneuroml.utils import get_model_file_list logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) diff --git a/tests/lems/test_lems.py b/tests/utils/test_simdata.py similarity index 95% rename from tests/lems/test_lems.py rename to tests/utils/test_simdata.py index 4130674f..27d27d0c 100644 --- a/tests/lems/test_lems.py +++ b/tests/utils/test_simdata.py @@ -1,8 +1,8 @@ #!/usr/bin/env python3 """ -Tests related to pyneuroml.lems module +Tests related to pyneuroml.utils.simdata module -File: tests/lems/tests_lems.py +File: tests/utils/test_simdata.py Copyright 2024 NeuroML contributors """ @@ -12,14 +12,14 @@ import tempfile import unittest -import pyneuroml.lems as pyl +import pyneuroml.utils.simdata as pyl logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) -class TestLEMSModule(unittest.TestCase): - """Test the LEMS module""" +class TestSimdataModule(unittest.TestCase): + """Test the simdata module""" def test_load_sim_data_from_lems_file(self): """Test the load_sim_data_from_lems_file function""" From 25b438dbef035248e59b7aabe7594036a5f21ab5 Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Thu, 31 Jul 2025 21:58:36 +0100 Subject: [PATCH 05/13] refactor: remove `pynml` import --- pyneuroml/lems/LEMSSimulation.py | 6 +++--- pyneuroml/lems/__init__.py | 8 ++++---- tests/channelml/test_channelml.py | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pyneuroml/lems/LEMSSimulation.py b/pyneuroml/lems/LEMSSimulation.py index 82b574c4..50a42987 100644 --- a/pyneuroml/lems/LEMSSimulation.py +++ b/pyneuroml/lems/LEMSSimulation.py @@ -12,8 +12,8 @@ import airspeed from neuroml import __version__ as libnml_ver -import pyneuroml.pynml as pynml2 from pyneuroml import __version__ as pynml_ver +from pyneuroml.io import read_lems_file, read_neuroml2_file from pyneuroml.utils.plot import get_next_hex_color from pyneuroml.utils.units import convert_to_units @@ -182,7 +182,7 @@ def include_neuroml2_file( self.include_files.append(nml2_file_name) if include_included: - cell = pynml2.read_neuroml2_file(full_path) + cell = read_neuroml2_file(full_path) for include in cell.includes: self.include_neuroml2_file( include.href, include_included=True, relative_to_dir=base_path @@ -204,7 +204,7 @@ def include_lems_file( self.include_files.append(lems_file_name) if include_included: - model = pynml2.read_lems_file(lems_file_name) + model = read_lems_file(lems_file_name) for inc in model.included_files: self.include_files.append(inc) diff --git a/pyneuroml/lems/__init__.py b/pyneuroml/lems/__init__.py index 116a724a..5ce85871 100644 --- a/pyneuroml/lems/__init__.py +++ b/pyneuroml/lems/__init__.py @@ -7,7 +7,7 @@ import neuroml -import pyneuroml.pynml as pynml2 +from pyneuroml.io import read_neuroml2_file from pyneuroml.lems.LEMSSimulation import LEMSSimulation from pyneuroml.utils.plot import get_next_hex_color @@ -143,10 +143,10 @@ def generate_lems_file_for_neuroml( ls = LEMSSimulation(sim_id, duration, dt, target, simulation_seed=simulation_seed) if nml_doc is None: - nml_doc = pynml2.read_neuroml2_file( + nml_doc = read_neuroml2_file( neuroml_file, include_includes=True, verbose=verbose ) - nml_doc_inc_not_included = pynml2.read_neuroml2_file( + nml_doc_inc_not_included = read_neuroml2_file( neuroml_file, include_includes=False, verbose=False ) else: @@ -231,7 +231,7 @@ def generate_lems_file_for_neuroml( ls.include_neuroml2_file(include.href, include_included=False) try: - sub_doc = pynml2.read_neuroml2_file(incl_curr) + sub_doc = read_neuroml2_file(incl_curr) sub_dir = ( os.path.dirname(incl_curr) if len(os.path.dirname(incl_curr)) > 0 diff --git a/tests/channelml/test_channelml.py b/tests/channelml/test_channelml.py index ddd90aaa..cd642af4 100644 --- a/tests/channelml/test_channelml.py +++ b/tests/channelml/test_channelml.py @@ -11,8 +11,8 @@ import pathlib as pl import unittest -import pyneuroml.pynml as pynml2 from pyneuroml.channelml import channelml2nml +from pyneuroml.validators import validate_neuroml2 from .. import BaseTestCase @@ -38,7 +38,7 @@ def test_channelml2nml(self): print(retval) with open(outfile, "w") as f: print(retval, file=f, flush=True) - self.assertTrue(pynml2.validate_neuroml2(outfile)) + self.assertTrue(validate_neuroml2(outfile)) pl.Path(outfile).unlink() From 618e2aa5532f41ade1d0b6489b487fa4c25cf86a Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Fri, 1 Aug 2025 13:42:30 +0100 Subject: [PATCH 06/13] fix: ensure `reload_saved_data` maintains its API --- pyneuroml/plot/PlotSpikes.py | 4 +- pyneuroml/plot/PlotTimeSeries.py | 4 +- pyneuroml/runners.py | 104 +++++++++++++++++++++---------- pyneuroml/utils/simdata.py | 13 ++-- 4 files changed, 79 insertions(+), 46 deletions(-) diff --git a/pyneuroml/plot/PlotSpikes.py b/pyneuroml/plot/PlotSpikes.py index 3d9ac8d9..af99487a 100644 --- a/pyneuroml/plot/PlotSpikes.py +++ b/pyneuroml/plot/PlotSpikes.py @@ -493,14 +493,14 @@ def plot_spikes_from_lems_file( :return: None :rtype: None """ - all_events: List[Dict] = pynmls.load_sim_data_from_lems_file( + all_events: Dict[str, Dict] = pynmls.load_sim_data_from_lems_file( lems_file_name, get_events=True, get_traces=False ) if len(all_events) > 1: show_each_plot_already = False - for event_data in all_events: + for filename, event_data in all_events.items(): spike_data = [] # type: List[Dict] for select, times in event_data.items(): new_dict = {"name": select} diff --git a/pyneuroml/plot/PlotTimeSeries.py b/pyneuroml/plot/PlotTimeSeries.py index 4b9a3c0d..e397b4e6 100644 --- a/pyneuroml/plot/PlotTimeSeries.py +++ b/pyneuroml/plot/PlotTimeSeries.py @@ -21,7 +21,7 @@ from pyneuroml.utils.cli import build_namespace logger = logging.getLogger(__name__) -logger.setLevel(logging.WARN) +logger.setLevel(logging.WARNING) TIME_SERIES_PLOTTER_DEFAULTS = { @@ -220,7 +220,7 @@ def plot_time_series_from_lems_file( """ # the user wants to see the plots - all_traces, _ = simd.load_sim_data_from_lems_file( + all_traces = simd.load_sim_data_from_lems_file( lems_file_name, base_dir, get_events=False, get_traces=True ) assert all_traces diff --git a/pyneuroml/runners.py b/pyneuroml/runners.py index d5173c1c..4413fd5d 100644 --- a/pyneuroml/runners.py +++ b/pyneuroml/runners.py @@ -1015,9 +1015,9 @@ def reload_saved_data( reload_traces: bool = True, verbose: bool = DEFAULTS["v"], remove_dat_files_after_load: bool = False, -) -> typing.Tuple[ - typing.Optional[typing.Dict[str, typing.Dict]], - typing.Optional[typing.Dict[str, typing.Dict]], +) -> typing.Union[ + typing.Dict[str, typing.Dict], + typing.Tuple[typing.Dict[str, typing.Dict], typing.Dict[str, typing.Dict]], ]: """Reload data saved from previous LEMS simulation run. @@ -1026,10 +1026,10 @@ def reload_saved_data( .. seealso:: - the :py:mod:pyneuroml.plot.PlotTimeSeries module + the :py:mod:`pyneuroml.plot.PlotTimeSeries` module Module for plotting time series - the :py:mod:pyneuroml.plot.simdata module + the :py:mod:`pyneuroml.plot.simdata` module Module for loading simulation data :param lems_file_name: name of LEMS file that was used to generate the data @@ -1052,25 +1052,20 @@ def reload_saved_data( :type remove_dat_files_after_load: bool :returns: if both `get_events` and `get_traces` are selected, a tuple with - two dictionaries of dictionaries, one for traces, one for events, is - returned: + two dictionaries, one for traces, one for events, is returned: .. code-block:: python all_traces, all_events - Otherwise one dictionary of dictionaries for whichever was selected is - returned, with None for the other. + Otherwise one dictionary for whichever was selected is returned. The events dictionary has the following format: .. code-block:: python { - "outputfile": - { - '': { 'cell id': [] } - } + '': { 'cell id': [] } } The traces dictionary has the following format: @@ -1078,34 +1073,75 @@ def reload_saved_data( .. code-block:: python { - "outputfile": - { - 't': [], - 'col 1': [] - 'col 2': [] - } + 't': [], + 'col 1': [] + 'col 2': [] } Each list has multiple dictionaries, one each for each output file in the LEMS file. - """ - all_traces: typing.Optional[typing.Dict] = None - all_events: typing.Optional[typing.Dict] = None - all_traces, all_events = pynmls.load_sim_data_from_lems_file( - lems_file_name=lems_file_name, - base_dir=base_dir, - get_events=reload_events, - get_traces=reload_traces, - t_run=t_run, - remove_dat_files_after_load=remove_dat_files_after_load, - ) - if all_traces and plot: - pynmlt._plot_traces( - all_traces, show_plot_already=show_plot_already, single_plot=False + + .. seealso:: + + The :py:mod:`pyneuroml.utils.simdata` module for more utility functions + on loading simulation data. + + """ + if not reload_events and not reload_traces: + raise ValueError("At least one of reload_traces or reload_events must be True") + + all_traces: typing.Dict[str, typing.Dict] = {} + all_events: typing.Dict[str, typing.Dict] = {} + if reload_traces and not reload_events: + all_traces = pynmls.load_sim_data_from_lems_file( + lems_file_name=lems_file_name, + base_dir=base_dir, + get_events=False, + get_traces=True, + t_run=t_run, + remove_dat_files_after_load=remove_dat_files_after_load, ) + elif reload_events and not reload_traces: + all_traces = pynmls.load_sim_data_from_lems_file( + lems_file_name=lems_file_name, + base_dir=base_dir, + get_events=True, + get_traces=False, + t_run=t_run, + remove_dat_files_after_load=remove_dat_files_after_load, + ) + else: + all_traces, all_events = pynmls.load_sim_data_from_lems_file( + lems_file_name=lems_file_name, + base_dir=base_dir, + get_events=True, + get_traces=True, + t_run=t_run, + remove_dat_files_after_load=remove_dat_files_after_load, + ) + + flat_traces = {} + if all_traces: + for f, traces in all_traces.items(): + flat_traces.update(traces) + + if plot: + pynmlt._plot_traces( + all_traces, show_plot_already=show_plot_already, single_plot=False + ) - return all_traces, all_events + flat_events = {} + if all_events: + for f, events in all_events.items(): + flat_events.update(events) + + if reload_events and reload_traces: + return flat_traces, flat_events + elif reload_traces and not reload_events: + return flat_traces + elif reload_events and not reload_traces: + return flat_events def generate_sim_scripts_in_folder( diff --git a/pyneuroml/utils/simdata.py b/pyneuroml/utils/simdata.py index 83770ebd..b087889e 100644 --- a/pyneuroml/utils/simdata.py +++ b/pyneuroml/utils/simdata.py @@ -17,7 +17,7 @@ from lxml import etree logger = logging.getLogger(__name__) -logger.setLevel(logging.DEBUG) +logger.setLevel(logging.WARNING) def load_traces_from_data_file( @@ -64,10 +64,7 @@ def load_sim_data_from_lems_file( get_traces: bool = True, t_run: datetime = datetime(1900, 1, 1), remove_dat_files_after_load: bool = False, -) -> typing.Tuple[ - typing.Optional[typing.Dict[str, typing.Dict]], - typing.Optional[typing.Dict[str, typing.Dict]], -]: +): """Load simulation outputs from LEMS simulation run. .. versionadded:: 1.2.2 @@ -94,7 +91,7 @@ def load_sim_data_from_lems_file( all_traces, all_events Otherwise one dictionary of dictionaries for whichever was selected is - returned, with None for the other. + returned. The events dictionary has the following format: @@ -261,6 +258,6 @@ def load_sim_data_from_lems_file( if get_events and get_traces: return all_traces, all_events elif get_traces: - return all_traces, None + return all_traces else: - return None, all_events + return all_events From 2391902ebf69e05aa0b7855fcec5a6ce3e3671f3 Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Fri, 1 Aug 2025 14:07:38 +0100 Subject: [PATCH 07/13] feat: add deprecation warning for `pynml` function --- pyneuroml/pynml.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pyneuroml/pynml.py b/pyneuroml/pynml.py index 11c0e5d0..2ffab39f 100644 --- a/pyneuroml/pynml.py +++ b/pyneuroml/pynml.py @@ -944,6 +944,12 @@ def reload_standard_dat_file(file_name: str) -> typing.Tuple[dict, list]: :type file_name: str :returns: tuple of (data, column names) """ + warnings.warn( + "This function will be removed in the future. Please prefer pyneuroml.utils.simdata.load_traces_from_data_file.", + FutureWarning, + stacklevel=2, + ) + with open(file_name) as dat_file: data = {} # type: dict indices = [] # type: list From a14ecb0f74308d0ca3b66f72eb8f7a7aeea7ce4c Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Fri, 1 Aug 2025 14:14:22 +0100 Subject: [PATCH 08/13] doc: add `utils.simdata` --- docs/source/pyneuroml.utils.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/source/pyneuroml.utils.rst b/docs/source/pyneuroml.utils.rst index 8647c8c0..f9faa408 100644 --- a/docs/source/pyneuroml.utils.rst +++ b/docs/source/pyneuroml.utils.rst @@ -38,6 +38,14 @@ pyneuroml.utils.cli module :undoc-members: :show-inheritance: +pyneuroml.utils.simdata module +------------------------------- + +.. automodule:: pyneuroml.utils.simdata + :members: + :undoc-members: + :show-inheritance: + pyneuroml.utils.units module ---------------------------- From 67c6760ca5d710b77d7dd50842b3416e20a60189 Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Fri, 1 Aug 2025 15:16:56 +0100 Subject: [PATCH 09/13] chore(ci): prefer `uv` if available --- test-ghactions.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test-ghactions.sh b/test-ghactions.sh index 4db3520e..cc60b8e7 100755 --- a/test-ghactions.sh +++ b/test-ghactions.sh @@ -4,7 +4,12 @@ set -e # CI already installs package and all optional dependencies, so this is redundant. # But we keep it to allow easy local testing. -pip install .[dev] +if command -v uv +then + uv pip install .[dev] +else + pip install .[dev] +fi # required to test commands that should fail # need this because other constructs don't work: From bfc2a1b864890e267f35a894b5e17f0e38002cee Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Fri, 1 Aug 2025 15:17:30 +0100 Subject: [PATCH 10/13] chore: update mod file --- examples/LeakConductance.mod | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/LeakConductance.mod b/examples/LeakConductance.mod index fe34375b..54b2fd87 100644 --- a/examples/LeakConductance.mod +++ b/examples/LeakConductance.mod @@ -3,9 +3,9 @@ TITLE Mod file for component: Component(id=LeakConductance type=ionChannelHH) COMMENT This NEURON file has been generated by org.neuroml.export (see https://github.com/NeuroML/org.neuroml.export) - org.neuroml.export v1.10.1 - org.neuroml.model v1.10.1 - jLEMS v0.11.1 + org.neuroml.export v1.11.0 + org.neuroml.model v1.11.0 + jLEMS v0.12.0 ENDCOMMENT From 260653ed81e30c71f964a921fe13ebd74b88192d Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Tue, 5 Aug 2025 11:21:00 +0100 Subject: [PATCH 11/13] fix(time-series): respect `offset` arg Had disabled it for testing --- pyneuroml/plot/PlotTimeSeries.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyneuroml/plot/PlotTimeSeries.py b/pyneuroml/plot/PlotTimeSeries.py index e397b4e6..2d158151 100644 --- a/pyneuroml/plot/PlotTimeSeries.py +++ b/pyneuroml/plot/PlotTimeSeries.py @@ -330,7 +330,7 @@ def _plot_traces( plot_time_series( traces, - offset=False, + offset=offset, labels=labels, show_plot_already=(show_plots and show_plot_already), save_figure_to=file_name, @@ -344,7 +344,7 @@ def _plot_traces( plot_time_series( flat_traces, - offset=False, + offset=offset, labels=labels, show_plot_already=(show_plots and show_plot_already), save_figure_to=save_figure_to, From 5ad898c3380053d70f75af4bcb281a09975a94f1 Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Tue, 5 Aug 2025 11:34:07 +0100 Subject: [PATCH 12/13] fix(time-series): pass all args --- pyneuroml/plot/PlotTimeSeries.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pyneuroml/plot/PlotTimeSeries.py b/pyneuroml/plot/PlotTimeSeries.py index 2d158151..16c353b0 100644 --- a/pyneuroml/plot/PlotTimeSeries.py +++ b/pyneuroml/plot/PlotTimeSeries.py @@ -330,8 +330,9 @@ def _plot_traces( plot_time_series( traces, - offset=offset, + title=title, labels=labels, + offset=offset, show_plot_already=(show_plots and show_plot_already), save_figure_to=file_name, **kwargs, @@ -344,8 +345,9 @@ def _plot_traces( plot_time_series( flat_traces, - offset=offset, + title=title, labels=labels, + offset=offset, show_plot_already=(show_plots and show_plot_already), save_figure_to=save_figure_to, **kwargs, From 8835fe3091dec39b1359c5219f362f8ad4f1737a Mon Sep 17 00:00:00 2001 From: "Ankur Sinha (Ankur Sinha Gmail)" Date: Wed, 6 Aug 2025 18:59:23 +0100 Subject: [PATCH 13/13] fix: correct import --- tests/archive/test_archive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/archive/test_archive.py b/tests/archive/test_archive.py index 3afff42c..ac328c8c 100644 --- a/tests/archive/test_archive.py +++ b/tests/archive/test_archive.py @@ -14,9 +14,9 @@ from pyneuroml.archive import ( create_combine_archive, create_combine_archive_manifest, - get_model_file_list, ) from pyneuroml.runners import run_jneuroml +from pyneuroml.utils import get_model_file_list from pyneuroml.utils.misc import chdir logger = logging.getLogger(__name__)