From 3262b1cd76ef0e65378a992c4a51df8cee230e1d Mon Sep 17 00:00:00 2001 From: Kristian Date: Thu, 19 Mar 2026 21:53:47 +0100 Subject: [PATCH 1/7] add functionality to consider data from multiple cameras in the analysis --- ledsa/analysis/ConfigDataStacked.py | 311 ++++++++++ .../analysis/StackedExtinctionCoefficients.py | 537 ++++++++++++++++++ 2 files changed, 848 insertions(+) create mode 100644 ledsa/analysis/ConfigDataStacked.py create mode 100644 ledsa/analysis/StackedExtinctionCoefficients.py diff --git a/ledsa/analysis/ConfigDataStacked.py b/ledsa/analysis/ConfigDataStacked.py new file mode 100644 index 0000000..fef7fee --- /dev/null +++ b/ledsa/analysis/ConfigDataStacked.py @@ -0,0 +1,311 @@ +import configparser as cp +from pathlib import Path +from typing import Dict, List, Optional + + +class ConfigDataStacked(cp.ConfigParser): + """ + Configuration for stacked (multi-camera) extinction coefficient computation. + + Reads ``config_stacked.ini``, which defines: + + - Global solver settings and domain parameters (``[DEFAULT]``) + - One ``[simulation_N]`` section per camera/LEDSA run + - A ``[led_arrays]`` section that maps virtual LED-array labels to + per-simulation LED-array IDs + + Example file layout:: + + [DEFAULT] + solver = linear + lambda_reg = 1e-3 + camera_channels = 0 1 2 + num_layers = 20 + domain_bounds = 0.0 3.0 + reference_property = sum_col_val + num_ref_images = 10 + ref_img_indices = None + average_images = False + num_cores = 1 + output_path = . + time_sync_tolerance = 0.5 + + [simulation_0] + path = /data/exp01/cam0 + camera_position = 1.0 0.0 1.5 + + [simulation_1] + path = /data/exp01/cam1 + camera_position = -1.0 0.0 1.5 + + [led_arrays] + # integer_id = sim_index:led_array_id [sim_index:led_array_id ...] + # sim_index refers to [simulation_N]; led_array_id is the integer ID from that run. + # List all sim:id pairs that observe the same physical array to stack their rays. + 0 = 0:2 1:3 + 1 = 0:4 1:5 + + """ + + DEFAULT_FILENAME = 'config_stacked.ini' + + def __init__(self, filename: Optional[str] = None, load_config_file: bool = True): + """ + :param filename: Path to the INI file. Defaults to ``config_stacked.ini`` + in the current working directory. + :type filename: str or None + :param load_config_file: Whether to load the file on construction. + :type load_config_file: bool + """ + cp.ConfigParser.__init__(self, allow_no_value=True) + self._filename = filename or self.DEFAULT_FILENAME + if load_config_file: + self.load() + + # ------------------------------------------------------------------ + # I/O + # ------------------------------------------------------------------ + + def load(self) -> None: + """Load the INI file from disk. + + :raises SystemExit: if the file is not found. + """ + try: + self.read_file(open(self._filename)) + except FileNotFoundError: + print( + f'{self._filename} not found in the working directory! ' + 'Please create it according to the documentation.' + ) + exit(1) + print(f'{self._filename} loaded.') + + def save(self) -> None: + """Write the current configuration back to disk.""" + with open(self._filename, 'w') as f: + self.write(f) + print(f'{self._filename} saved.') + + # ------------------------------------------------------------------ + # Helper + # ------------------------------------------------------------------ + + def get_list_of_values( + self, + section: str, + option: str, + dtype=int, + fallback=None, + ): + """Return a typed list for *section*/*option*, or *fallback*. + + The value ``None`` (case-insensitive) or an empty string yields + ``None`` rather than a list. + """ + if not self.has_option(section, option): + return fallback + raw = self.get(section, option, fallback=None) + if raw is None: + return fallback + raw = raw.strip() + if raw.lower() == 'none' or raw == '': + return None + try: + return [dtype(item) for item in raw.split()] + except Exception as exc: + print(f'Config parse error [{section}].{option}={raw!r}: {exc} — using fallback={fallback!r}') + return fallback + + # ------------------------------------------------------------------ + # Simulation sections + # ------------------------------------------------------------------ + + def get_simulation_sections(self) -> List[str]: + """Return all section names that start with ``simulation_``.""" + return sorted(s for s in self.sections() if s.startswith('simulation_')) + + def get_num_simulations(self) -> int: + """Number of simulation sections defined in the config.""" + return len(self.get_simulation_sections()) + + def get_simulation_path(self, sim_idx: int) -> Path: + """Filesystem path for simulation *sim_idx*.""" + return Path(self.get(f'simulation_{sim_idx}', 'path')) + + def get_camera_position(self, sim_idx: int) -> List[float]: + """Camera position ``[x, y, z]`` for simulation *sim_idx*.""" + return self.get_list_of_values(f'simulation_{sim_idx}', 'camera_position', dtype=float) + + # ------------------------------------------------------------------ + # LED-array mapping + # ------------------------------------------------------------------ + + def get_led_array_ids(self) -> List[int]: + """Return the virtual LED-array IDs defined in ``[led_arrays]`` as integers. + + ``ConfigParser.options()`` merges DEFAULT keys into every section, so + we explicitly subtract the DEFAULT keys to get only the keys that are + actually written inside the ``[led_arrays]`` block. + """ + default_keys = set(k.lower() for k in self.defaults()) + return sorted( + int(k) for k in self.options('led_arrays') if k not in default_keys + ) + + def get_led_array_mapping(self, led_array_id: int) -> Dict[int, int]: + """ + For a virtual array *led_array_id* return ``{sim_idx: led_array_id}``. + + Config value format: ``"0:2 1:3"`` + """ + raw = self.get('led_arrays', str(led_array_id)).strip() + mapping: Dict[int, int] = {} + for token in raw.split(): + sim_str, arr_str = token.split(':') + mapping[int(sim_str)] = int(arr_str) + return mapping + + # ------------------------------------------------------------------ + # Typed property accessors for global parameters + # ------------------------------------------------------------------ + + @property + def solver(self) -> str: + """Inversion method: ``'linear'`` or ``'nonlinear'``.""" + return self.get('DEFAULT', 'solver', fallback='linear') + + @property + def lambda_reg(self) -> float: + """Tikhonov regularisation parameter (linear solver).""" + return float(self.get('DEFAULT', 'lambda_reg', fallback='1e-3')) + + @property + def weighting_preference(self) -> float: + """Preference weighting for the nonlinear solver.""" + return float(self.get('DEFAULT', 'weighting_preference', fallback='-6e-3')) + + @property + def weighting_curvature(self) -> float: + """Curvature weighting for the nonlinear solver.""" + return float(self.get('DEFAULT', 'weighting_curvature', fallback='1e-6')) + + @property + def num_iterations(self) -> int: + """Maximum iterations for the nonlinear solver.""" + return int(self.get('DEFAULT', 'num_iterations', fallback='200')) + + @property + def reference_property(self) -> str: + """Column name used as the LED intensity measure.""" + return self.get('DEFAULT', 'reference_property', fallback='sum_col_val') + + @property + def num_ref_images(self) -> int: + """Number of reference images used to compute I₀.""" + return int(self.get('DEFAULT', 'num_ref_images', fallback='10')) + + @property + def ref_img_indices(self) -> Optional[List[int]]: + """Explicit reference image indices; ``None`` → use *num_ref_images*.""" + return self.get_list_of_values('DEFAULT', 'ref_img_indices', dtype=int, fallback=None) + + @property + def average_images(self) -> bool: + """Whether to use averaged image pairs.""" + return self.getboolean('DEFAULT', 'average_images', fallback=False) + + @property + def num_cores(self) -> int: + """Number of CPU cores for parallel processing.""" + return int(self.get('DEFAULT', 'num_cores', fallback='1')) + + @property + def num_layers(self) -> int: + """Number of horizontal smoke layers.""" + return int(self.get('DEFAULT', 'num_layers', fallback='20')) + + @property + def domain_bounds(self) -> List[float]: + """``[z_min, z_max]`` of the computational domain in metres.""" + return self.get_list_of_values('DEFAULT', 'domain_bounds', dtype=float) + + @property + def output_path(self) -> Path: + """Root path for all stacked-analysis output files.""" + return Path(self.get('DEFAULT', 'output_path', fallback='.')) + + @property + def camera_channels(self) -> List[int]: + """List of colour channels (0 = R, 1 = G, 2 = B) to process.""" + return self.get_list_of_values('DEFAULT', 'camera_channels', dtype=int, fallback=[0]) + + @property + def time_sync_tolerance(self) -> float: + """Maximum allowed difference [s] when matching images across simulations.""" + return float(self.get('DEFAULT', 'time_sync_tolerance', fallback='0.5')) + + # ------------------------------------------------------------------ + # Factory: write a template config to disk + # ------------------------------------------------------------------ + + @classmethod + def create_template(cls, filename: str = DEFAULT_FILENAME, num_simulations: int = 2) -> None: + """Write an annotated template ``config_stacked.ini`` to disk. + + :param filename: Destination file path. + :param num_simulations: Number of simulation blocks to include. + """ + cfg = cp.ConfigParser(allow_no_value=True) + + cfg['DEFAULT'] = {} + cfg.set('DEFAULT', '# ── Solver ────────────────────────────────────────────────────────') + cfg.set('DEFAULT', "# 'linear' (Tikhonov-NNLS) or 'nonlinear' (TNC minimisation)") + cfg['DEFAULT']['solver'] = 'linear' + cfg.set('DEFAULT', '# Regularisation for linear solver') + cfg['DEFAULT']['lambda_reg'] = '1e-3' + cfg.set('DEFAULT', '# Nonlinear solver weights (ignored when solver = linear)') + cfg['DEFAULT']['weighting_preference'] = '-6e-3' + cfg['DEFAULT']['weighting_curvature'] = '1e-6' + cfg['DEFAULT']['num_iterations'] = '200' + cfg.set('DEFAULT', '# ── Image / reference ──────────────────────────────────────────────') + cfg['DEFAULT']['reference_property'] = 'sum_col_val' + cfg['DEFAULT']['num_ref_images'] = '10' + cfg.set('DEFAULT', "# Explicit reference image indices (overrides num_ref_images); 'None' to disable") + cfg['DEFAULT']['ref_img_indices'] = 'None' + cfg['DEFAULT']['average_images'] = 'False' + cfg.set('DEFAULT', '# ── Channels / parallelism ─────────────────────────────────────────') + cfg.set('DEFAULT', '# Space-separated list: 0 = R, 1 = G, 2 = B') + cfg['DEFAULT']['camera_channels'] = '0' + cfg['DEFAULT']['num_cores'] = '1' + cfg.set('DEFAULT', '# ── Spatial domain ─────────────────────────────────────────────────') + cfg['DEFAULT']['num_layers'] = '20' + cfg.set('DEFAULT', '# Lower and upper z-bounds [m] of the computational domain') + cfg['DEFAULT']['domain_bounds'] = '0.0 3.0' + cfg.set('DEFAULT', '# ── Output ──────────────────────────────────────────────────────────') + cfg.set('DEFAULT', "# Directory for stacked-analysis results; '.' = current working directory") + cfg['DEFAULT']['output_path'] = '.' + cfg.set('DEFAULT', '# Maximum time difference [s] for matching images across cameras') + cfg['DEFAULT']['time_sync_tolerance'] = '0.5' + + for i in range(num_simulations): + section = f'simulation_{i}' + cfg[section] = {} + cfg.set(section, f'# Path to the LEDSA run directory for camera {i}') + cfg[section]['path'] = f'/path/to/sim{i}' + cfg.set(section, '# Global X Y Z position [m] of camera') + cfg[section]['camera_position'] = '0.0 0.0 1.5' + + cfg['led_arrays'] = {} + cfg.set('led_arrays', '# Each line maps a virtual array ID (integer) to one or more sim_index:led_array_id tokens.') + cfg.set('led_arrays', '# The integer key is the ID used in output filenames and postprocessing.') + cfg.set('led_arrays', '# sim_index refers to the [simulation_N] block; led_array_id is the integer ID') + cfg.set('led_arrays', '# assigned by that simulation\'s LEDSA run (see analysis/led_array_indices_.csv).') + cfg.set('led_arrays', '# List all sim:id pairs that observe the same physical array to stack their rays.') + cfg.set('led_arrays', '# Arrays seen by only one camera need just one token.') + cfg['led_arrays']['0'] = '0:0 1:0' + cfg['led_arrays']['1'] = '0:1 1:1' + + with open(filename, 'w') as f: + cfg.write(f) + print(f'Template written to {filename}') \ No newline at end of file diff --git a/ledsa/analysis/StackedExtinctionCoefficients.py b/ledsa/analysis/StackedExtinctionCoefficients.py new file mode 100644 index 0000000..1d19ddd --- /dev/null +++ b/ledsa/analysis/StackedExtinctionCoefficients.py @@ -0,0 +1,537 @@ +""" +Stacked extinction coefficient computation across multiple cameras/LEDSA runs. + +Usage example:: + + from ledsa.analysis.ConfigDataStacked import ConfigDataStacked + from ledsa.analysis.StackedExtinctionCoefficients import StackedExtinctionCoefficients + + cfg = ConfigDataStacked() # reads config_stacked.ini + + for arr_id in cfg.get_led_array_ids(): + for channel in cfg.camera_channels: + sec = StackedExtinctionCoefficients(cfg, arr_id, channel) + sec.calc_and_set_coefficients() + sec.save() +""" + +from __future__ import annotations + +import datetime +from pathlib import Path +from typing import Dict, List, Optional + +import numpy as np +import pandas as pd + +from ledsa.analysis.ConfigDataStacked import ConfigDataStacked +from ledsa.analysis.Experiment import Camera, Experiment, Layers +from ledsa.analysis.ExtinctionCoefficientsLinear import ExtinctionCoefficientsLinear +from ledsa.analysis.ExtinctionCoefficientsNonLinear import ExtinctionCoefficientsNonLinear +from importlib.metadata import version + + +class StackedExtinctionCoefficients: + """ + Joint inversion of extinction coefficients using ray data stacked across + multiple cameras (LEDSA simulation directories). + + For a given virtual LED-array ID and colour channel the class: + + 1. Builds one :class:`~ledsa.analysis.Experiment.Experiment` per + (simulation, ``led_array_id``) pair. + 2. Constructs a stacked system matrix by vertically concatenating the + per-experiment distance-per-layer matrices and writes it once per + virtual array (channel-independent) to ``/``. + 3. Computes the common time axis across all simulations, writes a + time-alignment report to ``/``, and stores it for reuse. + 4. Solves the augmented Beer–Lambert system with the configured solver. + 5. Writes extinction coefficient CSVs to + ``/analysis/extinction_coefficients//``. + + :ivar config: Stacked-analysis configuration. + :vartype config: ConfigDataStacked + :ivar led_array_id: Virtual LED-array ID (integer key in ``[led_arrays]``). + :vartype led_array_id: int + :ivar channel: Colour channel index. + :vartype channel: int + :ivar layers: Shared layer discretisation. + :vartype layers: Layers + :ivar led_array_mapping: ``{sim_idx: led_array_id}`` for this virtual array. + :vartype led_array_mapping: dict[int, int] + :ivar sub_experiments: Per-simulation :class:`Experiment` objects. + :vartype sub_experiments: list[Experiment] + :ivar distances_per_led_and_layer: Stacked distance matrix (n_leds_total × n_layers). + :vartype distances_per_led_and_layer: np.ndarray + :ivar ref_intensities: Stacked reference intensities (n_leds_total,). + :vartype ref_intensities: np.ndarray + :ivar coefficients_per_image_and_layer: Computed extinction coefficients. + :vartype coefficients_per_image_and_layer: list[np.ndarray] + :ivar common_times: Experiment times [s] shared by all simulations. + :vartype common_times: np.ndarray or None + """ + + def __init__(self, config: ConfigDataStacked, led_array_id: int, channel: int) -> None: + """ + :param config: Parsed ``config_stacked.ini``. + :type config: ConfigDataStacked + :param led_array_id: Integer key defined in the ``[led_arrays]`` section. + :type led_array_id: int + :param channel: Colour channel (0 = R, 1 = G, 2 = B). + :type channel: int + """ + self.config = config + self.led_array_id = led_array_id + self.channel = channel + + self.layers = Layers(config.num_layers, *config.domain_bounds) + self.led_array_mapping: Dict[int, int] = config.get_led_array_mapping(led_array_id) + + self.sub_experiments: List[Experiment] = [] + for sim_idx, arr_id in sorted(self.led_array_mapping.items()): + cam_pos = config.get_camera_position(sim_idx) + exp = Experiment( + layers=self.layers, + led_array=arr_id, + camera=Camera(*cam_pos), + path=config.get_simulation_path(sim_idx), + channel=channel, + merge_led_arrays='None', + ) + self.sub_experiments.append(exp) + + self._sub_solvers: List = [ + self._build_sub_solver(exp) for exp in self.sub_experiments + ] + + self.distances_per_led_and_layer: np.ndarray = np.array([]) + self.ref_intensities: np.ndarray = np.array([]) + self.coefficients_per_image_and_layer: List[np.ndarray] = [] + self.common_times: Optional[np.ndarray] = None + self._time_maps: Optional[List[Dict[int, float]]] = None # set by _compute_time_alignment + self.solver: str = config.solver + + # ------------------------------------------------------------------ + # Sub-solver factory + # ------------------------------------------------------------------ + + def _build_sub_solver(self, experiment: Experiment): + """Create a single-experiment solver instance for *experiment*.""" + cfg = self.config + if cfg.solver == 'linear': + return ExtinctionCoefficientsLinear( + experiment=experiment, + reference_property=cfg.reference_property, + num_ref_imgs=cfg.num_ref_images, + ref_img_indices=cfg.ref_img_indices, + average_images=cfg.average_images, + lambda_reg=cfg.lambda_reg, + ) + elif cfg.solver == 'nonlinear': + return ExtinctionCoefficientsNonLinear( + experiment=experiment, + reference_property=cfg.reference_property, + num_ref_imgs=cfg.num_ref_images, + ref_img_indices=cfg.ref_img_indices, + average_images=cfg.average_images, + weighting_preference=cfg.weighting_preference, + weighting_curvature=cfg.weighting_curvature, + num_iterations=cfg.num_iterations, + ) + else: + raise ValueError( + f'Unknown solver {cfg.solver!r}. Choose "linear" or "nonlinear".' + ) + + # ------------------------------------------------------------------ + # Main entry points + # ------------------------------------------------------------------ + + def calc_and_set_coefficients(self) -> None: + """ + Compute extinction coefficients for all time-aligned images. + + Populates :attr:`coefficients_per_image_and_layer` and + :attr:`common_times`. + """ + self._ensure_stacked_variables() + + ref_prop = self.config.reference_property + for _, aligned_intensities in self._iter_aligned_images(ref_prop): + rel_intensities = aligned_intensities / self.ref_intensities + if self.config.solver == 'nonlinear' and self.coefficients_per_image_and_layer: + self._sub_solvers[0].coefficients_per_image_and_layer = ( + self.coefficients_per_image_and_layer + ) + sigmas = self._sub_solvers[0].calc_coefficients_of_img(rel_intensities) + self.coefficients_per_image_and_layer.append(sigmas) + + def calc_and_set_coefficients_mp(self, cores: Optional[int] = None) -> None: + """Parallel computation using multiprocessing. + + :param cores: Number of worker processes. Defaults to ``config.num_cores``. + :type cores: int or None + """ + from multiprocessing import Pool + + if cores is None: + cores = self.config.num_cores + + self._ensure_stacked_variables() + ref_prop = self.config.reference_property + all_rel_intensities = [ + aligned / self.ref_intensities + for _, aligned in self._iter_aligned_images(ref_prop) + ] + + with Pool(processes=cores) as pool: + self.coefficients_per_image_and_layer = pool.map( + self._sub_solvers[0].calc_coefficients_of_img, all_rel_intensities + ) + + # ------------------------------------------------------------------ + # Lazy initialisation + # ------------------------------------------------------------------ + + def _ensure_stacked_variables(self) -> None: + """Load/compute distance matrix, time alignment, and reference intensities.""" + if self.distances_per_led_and_layer.size == 0: + self._build_stacked_distance_matrix() + if self._time_maps is None: + self._compute_time_alignment() + if self.ref_intensities.size == 0: + self._build_stacked_ref_intensities() + + def _build_stacked_distance_matrix(self) -> None: + """ + Populate each sub-solver's distance matrix, then stack them. + + The stacked distance matrix is written once per virtual array + (channel-independent) to:: + + /led_array__distances_per_layer.txt + + A guard prevents redundant writes when multiple channels are processed. + """ + for sim_idx, (sub, arr_id) in enumerate( + zip(self._sub_solvers, + [self.led_array_mapping[i] for i in sorted(self.led_array_mapping)]) + ): + exp = sub.experiment + if exp.num_leds is None: + raise FileNotFoundError( + f"Could not load LED data for simulation {sim_idx} " + f"(led_array_id={arr_id}, path={exp.path}).\n" + f"Expected files:\n" + f" {exp.path / 'analysis' / f'led_array_indices_{arr_id:03d}.csv'}\n" + f" {exp.path / 'analysis' / 'led_search_areas_with_coordinates.csv'}\n" + "Make sure LEDSA steps 1-3 have been completed in that directory " + "and that the led_array_id in [led_arrays] is correct." + ) + sub.set_all_member_variables(save_distances=False) + + self.distances_per_led_and_layer = np.vstack( + [sub.distances_per_led_and_layer for sub in self._sub_solvers] + ) + + dist_file = self.config.output_path / f'led_array_{self.led_array_id}_distances_per_layer.txt' + if not dist_file.exists(): + self.config.output_path.mkdir(parents=True, exist_ok=True) + np.savetxt(dist_file, self.distances_per_led_and_layer) + + self._sub_solvers[0].distances_per_led_and_layer = self.distances_per_led_and_layer + + def _build_stacked_ref_intensities(self) -> None: + """Concatenate per-sub-solver reference intensities.""" + self.ref_intensities = np.concatenate( + [sub.ref_intensities for sub in self._sub_solvers] + ) + + def _compute_time_alignment(self) -> None: + """ + Compute the common time axis across all simulations and write a + time-alignment report to:: + + /time_alignment_led_array_.txt + + The report is written once per virtual array (channel-independent). + Sets :attr:`_time_maps` and :attr:`common_times`. + """ + tol = self.config.time_sync_tolerance + + self._time_maps = [ + self._load_time_map(sub.experiment, sub.average_images) + for sub in self._sub_solvers + ] + + all_time_arrays = [np.array(list(tm.values())) for tm in self._time_maps] + common_times = self._intersect_time_axes(all_time_arrays, tol) + + if len(common_times) == 0: + raise RuntimeError( + 'No overlapping experiment times found across simulations for ' + f'led_array_id={self.led_array_id}. ' + 'Check time_sync_tolerance in config_stacked.ini.' + ) + self.common_times = common_times + + print( + f' [array {self.led_array_id}] ' + f'{len(common_times)} time-aligned images from ' + f'{self.config.get_num_simulations()} simulations.' + ) + + report_file = self.config.output_path / f'time_alignment_led_array_{self.led_array_id}.txt' + if not report_file.exists(): + self._write_time_alignment_report(report_file, self._time_maps, all_time_arrays, common_times, tol) + + def _write_time_alignment_report( + self, + path: Path, + time_maps: List[Dict[int, float]], + all_time_arrays: List[np.ndarray], + common_times: np.ndarray, + tol: float, + ) -> None: + """ + Write a plain-text time-alignment report. + + Includes a per-image table showing, for every common time step, which + image ID was selected from each simulation and the corresponding time + difference. + + :param path: Destination file path. + :param time_maps: ``{img_id: time}`` per simulation. + :param all_time_arrays: Flat time arrays per simulation. + :param common_times: Intersected common time axis. + :param tol: Matching tolerance used [s]. + """ + self.config.output_path.mkdir(parents=True, exist_ok=True) + sim_indices = sorted(self.led_array_mapping) + + lines = [ + f'Time-alignment report — led_array_id {self.led_array_id}', + f'Generated : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}', + f'Tolerance : {tol} s', + '', + 'Simulations', + '-----------', + ] + + for sim_idx, arr_id, times in zip( + sim_indices, self.led_array_mapping.values(), all_time_arrays + ): + sim_path = self.config.get_simulation_path(sim_idx) + t_min = float(times.min()) if len(times) else float('nan') + t_max = float(times.max()) if len(times) else float('nan') + lines += [ + f' sim {sim_idx} (led_array_id={arr_id})', + f' path : {sim_path}', + f' images : {len(times)}', + f' range : {t_min:.1f} s – {t_max:.1f} s', + ] + + lines += [ + '', + 'Alignment result', + '----------------', + f' Common images : {len(common_times)}', + ] + if len(common_times): + lines += [ + f' Time range : {float(common_times[0]):.1f} s – {float(common_times[-1]):.1f} s', + ] + + lines += ['', 'Per-simulation match rate', '-------------------------'] + for sim_idx, times in zip(sim_indices, all_time_arrays): + matched = sum(1 for t in times if np.any(np.abs(common_times - t) <= tol)) + pct = 100.0 * matched / len(times) if len(times) else 0.0 + lines.append(f' sim {sim_idx} : {matched}/{len(times)} images matched ({pct:.1f} %)') + + # ------------------------------------------------------------------ + # Per-image matching table + # ------------------------------------------------------------------ + # Build reverse map: time → img_id for each simulation + rev_maps: List[Dict[float, int]] = [ + {t: img_id for img_id, t in tm.items()} for tm in time_maps + ] + + # Column widths + col_t_common = 14 # "common t / s" + col_per_sim = 26 # "img_id t_sim / s Δt / s" per simulation + + # Header + sim_headers = ''.join( + f' {"sim " + str(si):^{col_per_sim}}' + for si in sim_indices + ) + sub_headers = ''.join( + f' {"img_id":>8} {"t_sim / s":>10} {"Δt / s":>6}' + for _ in sim_indices + ) + separator = '-' * (col_t_common + len(sim_indices) * (col_per_sim + 2)) + + lines += [ + '', + 'Per-image match table', + '---------------------', + f' {"common t / s":>{col_t_common}}' + sim_headers, + f' {" " * col_t_common}' + sub_headers, + ' ' + separator, + ] + + for t_common in common_times: + row = f' {t_common:>{col_t_common}.2f}' + for tm in time_maps: + best_id = self._nearest_img_id(tm, t_common, tol) + if best_id is not None: + t_sim = tm[best_id] + dt = t_sim - t_common + row += f' {best_id:>8d} {t_sim:>10.2f} {dt:>+6.2f}' + else: + row += f' {"—":>8} {"—":>10} {"—":>6}' + lines.append(row) + + path.write_text('\n'.join(lines) + '\n') + print(f' Time-alignment report: {path}') + + # ------------------------------------------------------------------ + # Time-aligned image iteration + # ------------------------------------------------------------------ + + def _iter_aligned_images(self, reference_property: str): + """ + Yield ``(virtual_img_id, stacked_intensity_vector)`` for every + time-aligned image set, using the pre-computed :attr:`common_times` + and :attr:`_time_maps`. + """ + tol = self.config.time_sync_tolerance + + for v_img_id, t_common in enumerate(self.common_times, start=1): + blocks: List[np.ndarray] = [] + skip = False + for sub, time_map in zip(self._sub_solvers, self._time_maps): + orig_id = self._nearest_img_id(time_map, t_common, tol) + if orig_id is None: + print( + f' Warning: no image within {tol} s of t={t_common:.2f} s ' + f'in simulation at {sub.experiment.path} — skipping time step.' + ) + skip = True + break + try: + img_slice = sub.calculated_img_data.loc[orig_id] + except KeyError: + print( + f' Warning: img_id={orig_id} missing in calculated_img_data ' + f'for {sub.experiment.path} — skipping time step.' + ) + skip = True + break + blocks.append(img_slice[reference_property].to_numpy()) + + if skip: + continue + yield v_img_id, np.concatenate(blocks) + + # ------------------------------------------------------------------ + # Save + # ------------------------------------------------------------------ + + def save(self) -> None: + """ + Write computed extinction coefficients to CSV. + + Output path:: + + /analysis/extinction_coefficients// + extinction_coefficients__channel___led_array_.csv + """ + if not self.coefficients_per_image_and_layer: + raise RuntimeError('No coefficients computed yet. Call calc_and_set_coefficients first.') + + out_path = self._output_dir() + out_path.mkdir(parents=True, exist_ok=True) + out_file = ( + out_path + / f'extinction_coefficients_{self.solver}_channel_{self.channel}' + f'_{self.config.reference_property}_led_array_{self.led_array_id}.csv' + ) + + layers = self.layers + sim_info = ' | '.join( + f'sim{idx}:arr{arr}' for idx, arr in sorted(self.led_array_mapping.items()) + ) + header = ( + f'Stacked inversion | {sim_info} | solver: {self.solver} | ' + f'ref_prop: {self.config.reference_property} | ' + f'LEDSA {version("ledsa")}\n' + ) + header += 'Layer_bottom_height[m],' + ','.join(map(str, layers.borders[:-1])) + '\n' + header += 'Layer_top_height[m],' + ','.join(map(str, layers.borders[1:])) + '\n' + header += 'Layer_center_height[m],' + ','.join( + f'{c:.4f}' for c in (layers.borders[:-1] + layers.borders[1:]) / 2 + ) + '\n' + header += 'Experiment_Time[s],layer0' + header += ''.join(f',layer{i + 1}' for i in range(layers.amount - 1)) + + times = ( + self.common_times + if self.common_times is not None + else np.arange(len(self.coefficients_per_image_and_layer), dtype=float) + ) + data = np.column_stack((times, self.coefficients_per_image_and_layer)) + np.savetxt(out_file, data, delimiter=',', header=header) + print(f'Saved: {out_file}') + + def __str__(self) -> str: + sim_info = ', '.join( + f'sim{idx}(arr{arr})' for idx, arr in sorted(self.led_array_mapping.items()) + ) + return ( + f'StackedExtinctionCoefficients | led_array_id={self.led_array_id} | ' + f'channel={self.channel} | solver={self.solver} | ' + f'simulations=[{sim_info}] | ' + f'n_leds={sum(e.num_leds for e in self.sub_experiments if e.num_leds is not None)} | ' + f'n_layers={self.layers.amount}\n' + ) + + # ------------------------------------------------------------------ + # Internal helpers + # ------------------------------------------------------------------ + + def _output_dir(self) -> Path: + return ( + self.config.output_path + / 'analysis' + / 'extinction_coefficients' + / self.solver + ) + + @staticmethod + def _load_time_map(experiment: Experiment, average_images: bool) -> Dict[int, float]: + info_file = ( + 'image_infos_analysis_avg.csv' if average_images else 'image_infos_analysis.csv' + ) + df = pd.read_csv(experiment.path / 'analysis' / info_file) + return {int(i + 1): float(t) for i, t in enumerate(df['Experiment_Time[s]'])} + + @staticmethod + def _intersect_time_axes(time_arrays: List[np.ndarray], tol: float) -> np.ndarray: + if not time_arrays: + return np.array([]) + common = time_arrays[0] + for times in time_arrays[1:]: + mask = np.array([np.any(np.abs(times - t) <= tol) for t in common]) + common = common[mask] + return common + + @staticmethod + def _nearest_img_id(time_map: Dict[int, float], target: float, tol: float) -> Optional[int]: + best_id, best_diff = None, float('inf') + for img_id, t in time_map.items(): + diff = abs(t - target) + if diff < best_diff: + best_diff = diff + best_id = img_id + return best_id if best_diff <= tol else None \ No newline at end of file From 35992680c941e4ef588b601adff3d425902d6ab1 Mon Sep 17 00:00:00 2001 From: Kristian Date: Thu, 19 Mar 2026 21:55:01 +0100 Subject: [PATCH 2/7] adapt parser for stacked analysis --- ledsa/__main__.py | 5 +- ledsa/core/parser_arguments_declaration.py | 13 ++++- ledsa/core/parser_arguments_run.py | 55 +++++++++++++++++++++- 3 files changed, 68 insertions(+), 5 deletions(-) diff --git a/ledsa/__main__.py b/ledsa/__main__.py index a61de6e..7d30c79 100644 --- a/ledsa/__main__.py +++ b/ledsa/__main__.py @@ -6,7 +6,7 @@ from ledsa.core.parser_arguments_declaration import (add_parser_arguments_tools, add_parser_arguments_data_extraction, add_parser_arguments_testing, add_parser_arguments_demo, add_parser_argument_analysis) from ledsa.core.parser_arguments_run import run_tools_arguments, run_data_extraction_arguments, run_testing_arguments, run_demo_arguments, \ - run_analysis_arguments, run_analysis_arguments_with_extinction_coefficient + run_analysis_arguments, run_analysis_arguments_with_extinction_coefficient, run_stacked_analysis def main(argv: List[str]) -> None: @@ -40,9 +40,10 @@ def main(argv: List[str]) -> None: run_data_extraction_arguments(args) run_analysis_arguments(args) run_analysis_arguments_with_extinction_coefficient(args) + run_stacked_analysis(args) run_testing_arguments(args) if __name__ == "__main__": - main(sys.argv[1:]) + main(sys.argv[1:]) \ No newline at end of file diff --git a/ledsa/core/parser_arguments_declaration.py b/ledsa/core/parser_arguments_declaration.py index 922ccb1..f5eb885 100644 --- a/ledsa/core/parser_arguments_declaration.py +++ b/ledsa/core/parser_arguments_declaration.py @@ -106,4 +106,15 @@ def add_parser_argument_analysis(parser: argparse.ArgumentParser) -> argparse.Ar 'the reference property is not already color corrected.') parser.add_argument('--cc_channels', default=[0, 1, 2], action='extend', nargs="+", type=int, help='Channels, to which color correcten gets applied. Default 0 1 2') - return parser + parser.add_argument('-conf_s', '--config_stacked', nargs='?', const='config_stacked.ini', default=None, + metavar='FILENAME', + help='Create a template config_stacked.ini for multi-camera stacked analysis. ' + 'Optional argument: output filename (default: config_stacked.ini). ' + 'Use --n_simulations to control the number of simulation blocks.') + parser.add_argument('--n_simulations', type=int, default=2, + help='Number of simulation/camera blocks written into the template ' + 'config_stacked.ini (used with -conf_s). Default: 2.') + parser.add_argument('-as', '--analysis_stacked', action='store_true', + help='Run stacked multi-camera extinction coefficient calculation ' + 'using config_stacked.ini in the current directory.') + return parser \ No newline at end of file diff --git a/ledsa/core/parser_arguments_run.py b/ledsa/core/parser_arguments_run.py index 6dcaa93..76ec77b 100644 --- a/ledsa/core/parser_arguments_run.py +++ b/ledsa/core/parser_arguments_run.py @@ -6,8 +6,10 @@ from ledsa.analysis import ExtinctionCoefficientsLinear as ECA from ledsa.analysis.ConfigDataAnalysis import ConfigDataAnalysis +from ledsa.analysis.ConfigDataStacked import ConfigDataStacked from ledsa.analysis.Experiment import Experiment from ledsa.analysis.ExperimentData import ExperimentData +from ledsa.analysis.StackedExtinctionCoefficients import StackedExtinctionCoefficients from ledsa.analysis.data_preparation import apply_color_correction import numpy as np @@ -177,6 +179,56 @@ def run_analysis_arguments(args) -> None: ex_data = ExperimentData() apply_cc_on_ref_property(ex_data, args.cc_channels) + if args.config_stacked is not None: + ConfigDataStacked.create_template( + filename=args.config_stacked, + num_simulations=args.n_simulations, + ) + + +def run_stacked_analysis(args) -> None: + """ + Run stacked multi-camera extinction coefficient calculation. + + Reads ``config_stacked.ini`` from the current working directory, then + iterates over every virtual LED-array label and colour channel defined + therein, computing and saving the joint extinction coefficients. + + :param args: Parsed command line arguments. + :type args: argparse.Namespace + """ + if not args.analysis_stacked: + return + + try: + open('config_stacked.ini') + except FileNotFoundError: + raise FileNotFoundError( + 'config_stacked.ini not found in working directory! ' + 'Create a template with: python -m ledsa -conf_s' + ) + + cfg = ConfigDataStacked() + + labels = cfg.get_led_array_ids() + channels = cfg.camera_channels + print( + f'Stacked analysis: {len(labels)} LED array(s) × ' + f'{len(channels)} channel(s), solver={cfg.solver!r}' + ) + + for led_array_id in labels: + for channel in channels: + print(f'\n── array={led_array_id} channel={channel} ──') + sec = StackedExtinctionCoefficients(cfg, led_array_id, channel) + print(sec) + if cfg.num_cores > 1: + print(f'Running on {cfg.num_cores} cores.') + sec.calc_and_set_coefficients_mp(cfg.num_cores) + else: + sec.calc_and_set_coefficients() + sec.save() + def run_analysis_arguments_with_extinction_coefficient(args) -> None: """ @@ -185,7 +237,6 @@ def run_analysis_arguments_with_extinction_coefficient(args) -> None: :param args: Parsed command line arguments :type args: argparse.Namespace """ - run_analysis_arguments(args) if args.analysis: extionction_coefficient_calculation(args) @@ -246,4 +297,4 @@ def extionction_coefficient_calculation(args) -> None: eca.save() print(f"{out_file} created!") else: - print(f"{out_file} already exists!") + print(f"{out_file} already exists!") \ No newline at end of file From f6fa96eec5df23ce85144d4a076b21c25b84a184 Mon Sep 17 00:00:00 2001 From: Kristian Date: Thu, 19 Mar 2026 21:55:41 +0100 Subject: [PATCH 3/7] bugfixing --- ledsa/analysis/ExtinctionCoefficients.py | 15 +++- ledsa/core/file_handling.py | 103 ++++++++++++----------- 2 files changed, 66 insertions(+), 52 deletions(-) diff --git a/ledsa/analysis/ExtinctionCoefficients.py b/ledsa/analysis/ExtinctionCoefficients.py index dda2f08..8f7a413 100644 --- a/ledsa/analysis/ExtinctionCoefficients.py +++ b/ledsa/analysis/ExtinctionCoefficients.py @@ -10,6 +10,7 @@ from importlib.metadata import version + class ExtinctionCoefficients(ABC): """ Parent class for the calculation of the Extinction Coefficients. @@ -103,15 +104,20 @@ def calc_and_set_coefficients_mp(self, cores=4) -> None: pool.close() self.coefficients_per_image_and_layer = sigmas - def set_all_member_variables(self) -> None: + def set_all_member_variables(self, save_distances: bool = True) -> None: """ Calculate distance traveled per layer, for every led, load image data from binary file and calculate reference intensities for each LED + :param save_distances: If True, write the distance matrix to a text file + in the current working directory. Pass False when calling from a + stacked context that manages its own distance file. + :type save_distances: bool """ if len(self.distances_per_led_and_layer) == 0: self.distances_per_led_and_layer = self.calc_distance_array() - file_name = f'led_array_{self.experiment.led_array}_distances_per_layer.txt' - np.savetxt(file_name, self.distances_per_led_and_layer) + if save_distances: + file_name = f'led_array_{self.experiment.led_array}_distances_per_layer.txt' + np.savetxt(file_name, self.distances_per_led_and_layer) if self.calculated_img_data.empty: self.load_img_data() if self.ref_intensities.shape[0] == 0: @@ -152,7 +158,7 @@ def save(self) -> None: header += 'Experiment_Time[s],' header += 'layer0' header += ''.join(f',layer{i + 1}' for i in range(self.experiment.layers.amount - 1)) - + experiment_times = _get_experiment_times_from_image_infos_file(self.average_images) coefficients_per_time_and_layer = np.column_stack( (experiment_times, self.coefficients_per_image_and_layer)) @@ -244,6 +250,7 @@ def multiindex_series_to_nparray(multi_series: pd.Series) -> np.ndarray: array[i] = multi_series.loc[i + 1] return array + def _get_experiment_times_from_image_infos_file(average_images): if average_images == True: image_infos_file = 'analysis/image_infos_analysis_avg.csv' diff --git a/ledsa/core/file_handling.py b/ledsa/core/file_handling.py index 110b149..9f7aad9 100644 --- a/ledsa/core/file_handling.py +++ b/ledsa/core/file_handling.py @@ -136,7 +136,7 @@ def read_hdf(channel: int, path='.') -> pd.DataFrame: fit_parameters = pd.read_hdf(file_path, key='/table') except (FileNotFoundError, KeyError): # If file doesn't exist or neither key works, create new binary data - create_binary_data(channel) + create_binary_data(channel, path=path) fit_parameters = pd.read_hdf(file_path, key='channel_values') fit_parameters.set_index(['img_id', 'led_id'], inplace=True) @@ -217,61 +217,68 @@ def extend_hdf(channel: int, quantity: str, values: np.ndarray) -> None: fit_parameters.to_hdf(file, key='channel_values', format='table') -def create_binary_data(channel: int) -> None: +def create_binary_data(channel: int, path: str = '.') -> None: """ Creates binary file from the CSV files for a specified channel and writes to an HDF file. :param channel: Channel number for which binary data is to be created. :type channel: int + :param path: Root directory of the LEDSA simulation. Defaults to the current directory. + :type path: str """ - config = ConfigData() - columns = _get_column_names(channel) - - fit_params_list = [] - - fit_params = pd.DataFrame(columns=columns) - - # find time and fit parameter for every image - - first_img_id = int(config['analyse_photo']['first_img_analysis_id']) - last_img_id = int(config['analyse_photo']['last_img_analysis_id']) - - if config['DEFAULT']['num_img_overflow'] != 'None': - max_id = int(config['DEFAULT']['num_img_overflow']) - else: - max_id = 10 ** 7 - number_of_images = (max_id + last_img_id - first_img_id) % max_id + 1 - number_of_images //= int(config['analyse_photo']['num_skip_imgs']) + 1 - print('Loading fit parameters...') - exception_counter = 0 - for image_id in range(1, number_of_images + 1): - try: - in_file_path = os.path.join('analysis', f'channel{channel}', f'{image_id}_led_positions.csv') - parameters = ledsa.core.file_handling.read_table(in_file_path, delim=',', silent=True) - except (FileNotFoundError, IOError): - fit_params_fragment = fit_params.append( - _param_array_to_dataframe([[np.nan] * (fit_params.shape[1] - 1)], image_id, - columns), - ignore_index=True, sort=False) + prev_dir = os.getcwd() + try: + os.chdir(path) + config = ConfigData() + columns = _get_column_names(channel) + + fit_params_list = [] + + fit_params = pd.DataFrame(columns=columns) + + # find time and fit parameter for every image + + first_img_id = int(config['analyse_photo']['first_img_analysis_id']) + last_img_id = int(config['analyse_photo']['last_img_analysis_id']) + + if config['DEFAULT']['num_img_overflow'] != 'None': + max_id = int(config['DEFAULT']['num_img_overflow']) + else: + max_id = 10 ** 7 + number_of_images = (max_id + last_img_id - first_img_id) % max_id + 1 + number_of_images //= int(config['analyse_photo']['num_skip_imgs']) + 1 + print('Loading fit parameters...') + exception_counter = 0 + for image_id in range(1, number_of_images + 1): + try: + in_file_path = os.path.join('analysis', f'channel{channel}', f'{image_id}_led_positions.csv') + parameters = ledsa.core.file_handling.read_table(in_file_path, delim=',', silent=True) + except (FileNotFoundError, IOError): + fit_params_fragment = fit_params.append( + _param_array_to_dataframe([[np.nan] * (fit_params.shape[1] - 1)], image_id, + columns), + ignore_index=True, sort=False) + fit_params_list.append(fit_params_fragment) + exception_counter += 1 + continue + + parameters = parameters[parameters[:, 0].argsort()] # sort for led_id + parameters = _append_coordinates(parameters) + fit_params_fragment = _param_array_to_dataframe(parameters, image_id, columns) fit_params_list.append(fit_params_fragment) - exception_counter += 1 - continue - - parameters = parameters[parameters[:, 0].argsort()] # sort for led_id - parameters = _append_coordinates(parameters) - fit_params_fragment = _param_array_to_dataframe(parameters, image_id, columns) - fit_params_list.append(fit_params_fragment) - fit_params = pd.concat(fit_params_list, ignore_index=True, sort=False) + fit_params = pd.concat(fit_params_list, ignore_index=True, sort=False) - print(f'{number_of_images - exception_counter} of {number_of_images} loaded.') - fit_params['img_id'] = fit_params['img_id'].astype(int) - fit_params['led_id'] = fit_params['led_id'].astype(int) - fit_params['led_array_id'] = fit_params['led_array_id'].astype(int) - fit_params['max_col_val'] = fit_params['max_col_val'].astype(int) - fit_params['sum_col_val'] = fit_params['sum_col_val'].astype(int) - out_file_path = os.path.join('analysis', f'channel{channel}', 'all_parameters.h5') - fit_params.to_hdf(out_file_path, key='channel_values', format='table', append=True) + print(f'{number_of_images - exception_counter} of {number_of_images} loaded.') + fit_params['img_id'] = fit_params['img_id'].astype(int) + fit_params['led_id'] = fit_params['led_id'].astype(int) + fit_params['led_array_id'] = fit_params['led_array_id'].astype(int) + fit_params['max_col_val'] = fit_params['max_col_val'].astype(int) + fit_params['sum_col_val'] = fit_params['sum_col_val'].astype(int) + out_file_path = os.path.join('analysis', f'channel{channel}', 'all_parameters.h5') + fit_params.to_hdf(out_file_path, key='channel_values', format='table', append=True) + finally: + os.chdir(prev_dir) def _get_column_names(channel: int) -> List[str]: """ @@ -397,4 +404,4 @@ def _append_coordinates_to_params(params: np.ndarray, coord: np.ndarray) -> np.n coord = np.reshape(coord[mask], (params.shape[0], coord.shape[1])) p_with_c[:, -2:] = coord[:, -2:] - return p_with_c + return p_with_c \ No newline at end of file From 19a66a0bc3ad42ddfd23d91fd3d912c716a7e04f Mon Sep 17 00:00:00 2001 From: Kristian Date: Thu, 19 Mar 2026 21:57:11 +0100 Subject: [PATCH 4/7] add acceptance tests for stacked analysis --- .../06_test_stacked_analysis.robot | 55 +++++ .../AcceptanceTests/LedsaATestLibrary.py | 194 +++++++++++++++++- 2 files changed, 243 insertions(+), 6 deletions(-) create mode 100644 ledsa/tests/AcceptanceTests/06_test_stacked_analysis.robot diff --git a/ledsa/tests/AcceptanceTests/06_test_stacked_analysis.robot b/ledsa/tests/AcceptanceTests/06_test_stacked_analysis.robot new file mode 100644 index 0000000..b259daf --- /dev/null +++ b/ledsa/tests/AcceptanceTests/06_test_stacked_analysis.robot @@ -0,0 +1,55 @@ +*** Settings *** +Resource global_keywords.resource + +Force Tags analysis stacked + +*** Test Cases *** +Step Stacked Analysis Linear Solver + Create Stacked Test Data + Setup Camera Simulation cam0 0.5 + Setup Camera Simulation cam1 2.5 + Change Directory ${WORKDIR}${/}stacked + Create And Fill Config Stacked cam0 cam1 + Execute Ledsa -as + Plot Stacked Extinction Coefficients linear + Check Stacked Extinction Coefficient Results linear + + +*** Keywords *** +Create Stacked Test Data + Log Creating stacked test data for two cameras at different heights + Change Directory ${WORKDIR} + Create Test Data Stacked + +Setup Camera Simulation + [Arguments] ${cam_dir} ${cam_z} + Log Setting up camera simulation in ${cam_dir} with camera height z=${cam_z} + Change Directory ${WORKDIR}${/}stacked${/}${cam_dir} + Create And Fill Config For Stacked Cam ${cam_z} + Execute Ledsa -s1 + Execute Ledsa -s2 + Execute Ledsa --coordinates + Execute Ledsa -s3_fast + +Plot Stacked Extinction Coefficients + [Arguments] ${solver} + Log Plotting stacked extinction coefficients with ${solver} solver + Change Directory ${WORKDIR}${/}stacked + Plot Stacked Input Vs Computed Extinction Coefficients ${solver} + +Check Stacked Extinction Coefficient Results + [Arguments] ${solver} + Log Checking stacked extinction coefficient results for ${solver} solver + Check Stacked Results 2 ${solver} + Check Stacked Results 3 ${solver} + Check Stacked Results 4 ${solver} + +Check Stacked Results + [Arguments] ${image_id} ${solver} + Log Checking stacked results for image ${image_id} + ${rmse} = Check Stacked Input Vs Computed ${image_id} ${solver} + Stacked Rmse Should Be Small ${rmse} + +Stacked Rmse Should Be Small + [Arguments] ${rmse} + Should Be True ${rmse} < 0.05 diff --git a/ledsa/tests/AcceptanceTests/LedsaATestLibrary.py b/ledsa/tests/AcceptanceTests/LedsaATestLibrary.py index 1d62539..d616b12 100644 --- a/ledsa/tests/AcceptanceTests/LedsaATestLibrary.py +++ b/ledsa/tests/AcceptanceTests/LedsaATestLibrary.py @@ -12,6 +12,7 @@ from TestExperiment import TestExperiment, Layers, Camera from ledsa.analysis.ConfigDataAnalysis import ConfigDataAnalysis +from ledsa.analysis.ConfigDataStacked import ConfigDataStacked from ledsa.core.ConfigData import ConfigData @@ -66,6 +67,187 @@ def extco_quad(z): ex.set_extinction_coefficients(extinction_coefficients) create_test_image(image_id, ex) + # ------------------------------------------------------------------ + # Stacked / multi-camera keywords + # ------------------------------------------------------------------ + + @keyword + def create_test_data_stacked(self, num_of_leds=100, num_of_layers=20, bottom_border=0, top_border=3): + """Create test images for two cameras at different heights. + + Directory layout created under the current working directory:: + + stacked/ + test_data/ ← ground-truth extinction coefficient CSVs + cam0/test_data/ ← images as seen from the lower camera + cam1/test_data/ ← images as seen from the upper camera + """ + z_range = np.linspace(bottom_border, top_border, num_of_layers) + + EPS = 1e-10 + SIGMA_NOISE = 0.05 + + def _add_noise(x, *, sigma=SIGMA_NOISE): + return np.clip(x + normal(0, sigma, x.shape), a_min=0.0, a_max=None) + EPS + + profiles = [ + np.full_like(z_range, EPS), # image 1 – reference (near-zero) + _add_noise(np.full_like(z_range, 0.2)), # image 2 – constant + _add_noise(0.15 * z_range), # image 3 – linear + _add_noise(0.0435 * z_range ** 2), # image 4 – quadratic + ] + + # Save ground-truth extinction coefficients once (shared by both cameras) + gt_dir = os.path.join('stacked', 'test_data') + os.makedirs(gt_dir, exist_ok=True) + for image_id, extco in enumerate(profiles): + np.savetxt( + os.path.join(gt_dir, f'test_extinction_coefficients_input_{image_id + 1}.csv'), + extco, + ) + + # Two cameras at different heights view the same LED array + cameras = [ + ('cam0', Camera(0, 0, 0.5)), # lower camera + ('cam1', Camera(0, 0, 2.5)), # upper camera + ] + layers = Layers(num_of_layers, bottom_border, top_border) + + for cam_name, camera in cameras: + img_dir = os.path.join('stacked', cam_name, 'test_data') + os.makedirs(img_dir, exist_ok=True) + for image_id, extco in enumerate(profiles): + ex = TestExperiment(camera=camera, layers=layers) + for z in np.linspace(bottom_border + 0.05, top_border - 0.05, num_of_leds): + ex.add_led(0, 4, z) + ex.set_extinction_coefficients(extco) + create_test_image(image_id, ex, img_dir=img_dir) + + @keyword + def create_and_fill_config_for_stacked_cam(self, cam_z, first=1, last=4): + """Create config.ini in the current directory for one camera simulation. + + The camera height *cam_z* is only needed for the LEDSA coordinate step; + it is not stored in config.ini (that uses the physical LED array geometry). + """ + conf = ConfigData( + load_config_file=False, + img_directory='test_data/', + search_area_radius=10, + pixel_value_percentile=99.875, + channel=0, + max_num_leds=1000, + num_arrays=1, + num_cores=1, + date=None, + start_time=None, + time_ref_img_id=None, + time_ref_img_time=None, + time_diff_to_image_time=0, + img_name_string='test_img_{}.jpg', + num_img_overflow=None, + first_img_experiment_id=first, + last_img_experiment_id=last, + ref_img_id=1, + ignore_led_indices=None, + led_array_edge_indices=None, + led_array_edge_coordinates=None, + first_img_analysis_id=first, + last_img_analysis_id=last, + num_skip_imgs=0, + num_skip_leds=0, + merge_led_array_indices=None, + ) + conf.set('analyse_positions', ' led_array_edge_indices', '49 0') + conf.set('analyse_positions', ' led_array_edge_coordinates', '0 4 0.05 0 4 2.95') + conf.set('DEFAULT', ' date', '2018:11:27') + conf.save() + + @keyword + def create_and_fill_config_stacked(self, cam0_dir, cam1_dir): + """Create config_stacked.ini in the current directory. + + *cam0_dir* and *cam1_dir* are paths (absolute or relative to the + current working directory) to the two camera simulation directories. + """ + cfg = ConfigDataStacked(load_config_file=False) + cfg['DEFAULT']['solver'] = 'linear' + cfg['DEFAULT']['lambda_reg'] = '1e-3' + cfg['DEFAULT']['reference_property'] = 'sum_col_val' + cfg['DEFAULT']['num_ref_images'] = '1' + cfg['DEFAULT']['ref_img_indices'] = 'None' + cfg['DEFAULT']['average_images'] = 'False' + cfg['DEFAULT']['camera_channels'] = '0' + cfg['DEFAULT']['num_cores'] = '1' + cfg['DEFAULT']['num_layers'] = '20' + cfg['DEFAULT']['domain_bounds'] = '0 3' + cfg['DEFAULT']['output_path'] = '.' + cfg['DEFAULT']['time_sync_tolerance'] = '2' + + cfg['simulation_0'] = {} + cfg['simulation_0']['path'] = str(os.path.abspath(cam0_dir)) + cfg['simulation_0']['camera_position'] = '0 0 0.5' + + cfg['simulation_1'] = {} + cfg['simulation_1']['path'] = str(os.path.abspath(cam1_dir)) + cfg['simulation_1']['camera_position'] = '0 0 2.5' + + cfg['led_arrays'] = {} + cfg['led_arrays']['0'] = '0:0 1:0' + + cfg.save() + + @keyword + def plot_stacked_input_vs_computed_extinction_coefficients(self, solver='linear', first=2, last=4, led_array_id=0, channel=0): + """Plot input vs stacked-computed extinction coefficients for each test image.""" + filename = ( + f'extinction_coefficients_{solver}_channel_{channel}' + f'_sum_col_val_led_array_{led_array_id}.csv' + ) + result_path = os.path.join('analysis', 'extinction_coefficients', solver, filename) + data = np.loadtxt(result_path, delimiter=',', skiprows=4) + times = data[:, 0] + extco_computed = data[:, 1:] + num_of_layers = extco_computed.shape[1] + + os.makedirs('results', exist_ok=True) + for image_id in range(first, last + 1): + gt_path = os.path.join('test_data', f'test_extinction_coefficients_input_{image_id}.csv') + extco_input = np.loadtxt(gt_path) + row = image_id - 1 + + plt.figure() + plt.plot(extco_input, range(num_of_layers), '.-', label='Input') + plt.plot(extco_computed[row, :], range(num_of_layers), '.-', label='Stacked computed') + plt.xlabel(r'Extinction coefficient / m$^{-1}$') + plt.ylabel('Layer / -') + plt.title(f'Stacked {solver} — image {image_id}, t = {times[row]:.0f} s') + plt.xlim(-0.05, 0.6) + plt.ylim(-1, num_of_layers) + plt.grid(linestyle='--', alpha=0.5) + plt.legend() + plt.tight_layout() + plt.savefig(os.path.join('results', f'stacked_image_{image_id}_{solver}.pdf')) + plt.close() + + @keyword + def check_stacked_input_vs_computed(self, image_id, solver='linear', led_array_id=0, channel=0): + """Return the RMSE between the stacked reconstruction and the ground truth.""" + filename = ( + f'extinction_coefficients_{solver}_channel_{channel}' + f'_sum_col_val_led_array_{led_array_id}.csv' + ) + result_path = os.path.join('analysis', 'extinction_coefficients', solver, filename) + data = np.loadtxt(result_path, delimiter=',', skiprows=4) + extco_computed = data[:, 1:] # first column is experiment time + + gt_path = os.path.join('test_data', f'test_extinction_coefficients_input_{image_id}.csv') + extco_input = np.loadtxt(gt_path) + + row = int(image_id) - 1 + rmse = np.sqrt(np.mean((extco_input - extco_computed[row, :]) ** 2)) + return rmse + @keyword def plot_input_vs_computed_extinction_coefficients(self, solver, first=1, last=4, led_array=0, channel=0): time, extinction_coefficients_computed = load_extinction_coefficients_computed(solver, channel, led_array) @@ -157,15 +339,15 @@ def load_extinction_coefficients_computed(solver, channel, led_array): extinction_coefficients_computed = data[:, 1:] return time, extinction_coefficients_computed -def create_test_image(image_id, experiment): +def create_test_image(image_id, experiment, img_dir='test_data'): """ Creates three test images with black and gray pixels representing 3 leds and sets the exif data needed The first image has 100% transmission on all LEDs, the second image has 50% transmission on all LEDs, the third has 50%, 70% and 80% transmission on the top, middle and bottom LEDs. :return: None """ - # Create test_data directory if it doesn't exist - if not os.path.exists('test_data'): - os.makedirs('test_data') + # Create img_dir directory if it doesn't exist + if not os.path.exists(img_dir): + os.makedirs(img_dir) num_of_leds = len(experiment.leds) transmissions = experiment.calc_all_led_transmissions() @@ -175,7 +357,7 @@ def create_test_image(image_id, experiment): img = Image.fromarray(img_array, 'RGB') # Save image without EXIF data - out = os.path.join("test_data", f"test_img_{image_id + 1}.jpg") + out = os.path.join(img_dir, f"test_img_{image_id + 1}.jpg") img.save(out) # Add EXIF data to the image afterward @@ -234,4 +416,4 @@ def is_tqdm_output(text): :return: True if the text appears to be from tqdm, False otherwise """ - return "Processing images" in text + return "Processing images" in text \ No newline at end of file From 640414177de05d1edaeb27f11028c0345a2d8461 Mon Sep 17 00:00:00 2001 From: Kristian Date: Thu, 19 Mar 2026 22:13:54 +0100 Subject: [PATCH 5/7] adapt pot-processing for stacked analysis data --- ledsa/postprocessing/simulation.py | 736 +++++++++++++++++------------ 1 file changed, 428 insertions(+), 308 deletions(-) diff --git a/ledsa/postprocessing/simulation.py b/ledsa/postprocessing/simulation.py index 5d1e28d..718f4d6 100644 --- a/ledsa/postprocessing/simulation.py +++ b/ledsa/postprocessing/simulation.py @@ -7,25 +7,259 @@ from ledsa.core.image_reading import read_channel_data_from_img from ledsa.core.ConfigData import ConfigData from ledsa.analysis.ConfigDataAnalysis import ConfigDataAnalysis +from ledsa.analysis.ConfigDataStacked import ConfigDataStacked -class SimData: - def __init__(self, path_simulation: str, read_all=True, remove_duplicates=False, load_config_params=True): +# ────────────────────────────────────────────────────────────────────────────── +# Shared base +# ────────────────────────────────────────────────────────────────────────────── + +class _BaseSimData: + """ + Shared base for :class:`SimData` and :class:`StackedSimData`. + + Holds layer geometry, the combined extinction-coefficient DataFrame, and all + query methods. Subclasses are responsible for populating the attributes in + their ``__init__`` and for implementing :meth:`_get_extco_df_from_path`. + """ + + def __init__(self): + self.all_extco_df = None + self.solver = None + self.camera_channels = [0] + self.num_ref_images = None + self.n_layers = None + self.lower_domain_bound = None + self.upper_domain_bound = None + self.layer_bottom_heights = None + self.layer_top_heights = None + self.layer_center_heights = None + + # ------------------------------------------------------------------ + # Layer-geometry helpers + # ------------------------------------------------------------------ + + def _set_layer_geometry(self, domain_bounds, n_layers): + """Compute and store layer border and centre arrays.""" + self.n_layers = n_layers + self.lower_domain_bound = domain_bounds[0] + self.upper_domain_bound = domain_bounds[1] + borders = [ + (i / n_layers) * (domain_bounds[1] - domain_bounds[0]) + domain_bounds[0] + for i in range(n_layers + 1) + ] + self.layer_bottom_heights = np.round(np.array(borders[:-1]), 2) + self.layer_top_heights = np.round(np.array(borders[1:]), 2) + self.layer_center_heights = np.round( + (self.layer_bottom_heights + self.layer_top_heights) / 2, 2 + ) + + def layer_from_height(self, height): + """Return the layer index that contains *height* [m].""" + return int( + (height - self.lower_domain_bound) + / (self.upper_domain_bound - self.lower_domain_bound) + * self.n_layers + ) + + # ------------------------------------------------------------------ + # Smoothing + # ------------------------------------------------------------------ + + def _apply_moving_average(self, df, window, smooth='ma'): + """ + Apply a centred rolling window to *df*. + + If *window* is 1, the raw data is returned unchanged. Otherwise a + centred rolling window is applied using either mean (``'ma'``) or + median (``'median'``). + + :param df: Input DataFrame (time as index). + :type df: pd.DataFrame + :param window: Window width. A value of 1 returns *df* unchanged. + :type window: int + :param smooth: Smoothing method: ``'ma'`` for moving average or + ``'median'`` for rolling median. Defaults to ``'ma'``. + :type smooth: str + :return: Smoothed DataFrame of the same shape. + :rtype: pd.DataFrame + """ + if window == 1: + return df + roller = df.rolling(window=window, closed='right') + smoothed = roller.median() if smooth == 'median' else roller.mean() + return smoothed.shift(-int(window / 2) + 1) + + # ------------------------------------------------------------------ + # Time helpers + # ------------------------------------------------------------------ + + def get_closest_time(self, time): + """ + Return the index value in :attr:`all_extco_df` nearest to *time*. + + :param time: Target time [s]. + :type time: int or float + :return: Closest matching time index value. + """ + return self.all_extco_df.index[abs(self.all_extco_df.index - time).argmin()] + + def set_timeshift(self, timedelta): + """ + Shift the time axis of :attr:`all_extco_df` by *timedelta* seconds. + + :param timedelta: Time offset [s]. + :type timedelta: float + """ + if self.all_extco_df is not None: + self.all_extco_df.index += timedelta + + # ------------------------------------------------------------------ + # Extinction-coefficient queries + # ------------------------------------------------------------------ + + def get_extco_at_time(self, channel, time, yaxis='layer', height_reference='center', window=1, smooth='ma'): + """ + Extinction coefficients for all LED arrays at a single time step. + + :param channel: Colour channel index. + :type channel: int + :param time: Experiment time [s]. Use :meth:`get_closest_time` to snap + to the nearest available time. + :param yaxis: Index type of the returned DataFrame. + ``'layer'`` returns integer layer indices; ``'height'`` returns + physical heights [m]. + :type yaxis: str + :param height_reference: Which layer border to use as the height + reference: ``'center'``, ``'top'``, or ``'bottom'``. + :type height_reference: str + :param window: Rolling-window size for temporal smoothing. 1 = no + smoothing. + :type window: int + :param smooth: Smoothing method: ``'ma'`` (mean) or ``'median'``. + :type smooth: str + :return: DataFrame indexed by layer / height; one column per LED array. + :rtype: pd.DataFrame + """ + ch_df = self.all_extco_df.xs(channel, level=0, axis=1) + ma_df = self._apply_moving_average(ch_df, window, smooth) + ma_df = ma_df.loc[time, :].reset_index().pivot(columns='LED Array', index='Layer') + ma_df.columns = ma_df.columns.droplevel() + ma_df.index = range(ma_df.shape[0]) + return self._set_extco_yaxis(ma_df, yaxis, height_reference) + + def get_extco_at_led_array(self, channel, led_array, yaxis='layer', height_reference='center', window=1, smooth='ma'): + """ + Time series of extinction coefficients for one LED array. + + *led_array* is an ``int`` for :class:`SimData` (LED array ID) and a + ``str`` for :class:`StackedSimData` (virtual array label, e.g. + ``'array_A'``). + + :param channel: Colour channel index. + :type channel: int + :param led_array: LED array identifier (int for SimData, str for StackedSimData). + :param yaxis: Column type of the returned DataFrame: + ``'layer'`` or ``'height'``. + :type yaxis: str + :param height_reference: ``'center'``, ``'top'``, or ``'bottom'``. + :type height_reference: str + :param window: Rolling-window size for temporal smoothing. + :type window: int + :param smooth: Smoothing method: ``'ma'`` or ``'median'``. + :type smooth: str + :return: DataFrame indexed by time; one column per layer / height. + :rtype: pd.DataFrame + """ + ma_df = self._apply_moving_average( + self.all_extco_df.xs(channel, level=0, axis=1).xs(led_array, level=0, axis=1), + window, smooth, + ) + if yaxis == 'layer': + ma_df.columns.names = ['Layer'] + elif yaxis == 'height': + ma_df.columns = self._height_axis(height_reference) + return ma_df + + def get_extco_at_layer(self, channel, layer, window=1, smooth='ma'): + """ + Time series of extinction coefficients for a single layer. + + :param channel: Colour channel index. + :type channel: int + :param layer: Layer index (0 = bottom-most layer). + :type layer: int + :param window: Rolling-window size for temporal smoothing. + :type window: int + :param smooth: Smoothing method: ``'ma'`` or ``'median'``. + :type smooth: str + :return: DataFrame indexed by time; one column per LED array. + :rtype: pd.DataFrame + """ + ch_df = self.all_extco_df.xs(channel, level=0, axis=1).xs(layer, level=1, axis=1) + return self._apply_moving_average(ch_df, window, smooth) + + def get_extco_at_height(self, channel, height, window=1, smooth='ma'): + """ + Time series of extinction coefficients at a physical height. + + Converts *height* to a layer index via :meth:`layer_from_height` and + delegates to :meth:`get_extco_at_layer`. + + :param channel: Colour channel index. + :type channel: int + :param height: Target height [m]. + :type height: float + :param window: Rolling-window size for temporal smoothing. + :type window: int + :param smooth: Smoothing method: ``'ma'`` or ``'median'``. + :type smooth: str + :return: DataFrame indexed by time; one column per LED array. + :rtype: pd.DataFrame + """ + return self.get_extco_at_layer(channel, self.layer_from_height(height), window, smooth) + + # ------------------------------------------------------------------ + # Internal helpers shared by query methods above + # ------------------------------------------------------------------ + + def _height_axis(self, height_reference): + if height_reference == 'center': + return self.layer_center_heights + elif height_reference == 'top': + return self.layer_top_heights + elif height_reference == 'bottom': + return self.layer_bottom_heights + raise ValueError("height_reference must be 'center', 'top', or 'bottom'") + + def _set_extco_yaxis(self, df, yaxis, height_reference): + if yaxis == 'layer': + df.index.names = ['Layer'] + elif yaxis == 'height': + df.index = self._height_axis(height_reference) + df.index.names = ['Height / m'] + return df + + +# ────────────────────────────────────────────────────────────────────────────── +# Single-camera simulation +# ────────────────────────────────────────────────────────────────────────────── + +class SimData(_BaseSimData): + def __init__(self, path_simulation, read_all=True, remove_duplicates=False, load_config_params=True): """ Initializes SimData with simulation and image analysis settings. :param path_simulation: Directory containing simulation data. :type path_simulation: str - :param path_images: Directory containing experimental images, defaults to None. - :type path_images: str, optional :param read_all: If True, reads all data on initialization, defaults to True. :type read_all: bool, optional :param remove_duplicates: If True, removes duplicate entries from analysis, defaults to False. :type remove_duplicates: bool, optional :param load_config_params: If True, loads parameters from config files, defaults to True. :type load_config_params: bool, optional - :raises ValueError: If path_images is not provided when required for image analysis. """ + super().__init__() self.led_params_timedelta = 0 self.ch0_ledparams_df = None self.ch1_ledparams_df = None @@ -33,113 +267,58 @@ def __init__(self, path_simulation: str, read_all=True, remove_duplicates=False, self.ch0_extcos = None self.ch1_extcos = None self.ch2_extcos = None - self.all_extco_df = None - self.solver = None self.average_images = False - self.camera_channels = [0] - self.num_ref_images = None - self.n_layers = None self.search_area_radius = None self.path_images = None - self.lower_domain_bound = None - self.upper_domain_bound = None - self.layer_bottom_heights = None - self.layer_top_heights = None - self.layer_center_heights = None if load_config_params: os.chdir(path_simulation) - # Read configuration from config.ini and config_analysis.ini self.config = ConfigData() self.config_analysis = ConfigDataAnalysis() - # Get parameters from config file self.search_area_radius = self.config['find_search_areas']['search_area_radius'] self.path_images = self.config['find_search_areas']['img_directory'] - # Get parameters from config_analysis file self.solver = self.config_analysis['DEFAULT']['solver'] self.average_images = self.config_analysis.getboolean('DEFAULT', 'average_images') self.camera_channels = self.config_analysis.get_list_of_values('DEFAULT', 'camera_channels', dtype=int) self.num_ref_images = int(self.config_analysis['DEFAULT']['num_ref_images']) - self.n_layers = int(self.config_analysis['model_parameters']['num_layers']) - domain_bounds = self.config_analysis.get_list_of_values('model_parameters', 'domain_bounds', dtype=float) - self.lower_domain_bound = domain_bounds[0] - self.upper_domain_bound = domain_bounds[1] - layer_heights = [(l / self.n_layers) * (self.upper_domain_bound - self.lower_domain_bound) + self.lower_domain_bound for l in - range(self.n_layers + 1)] - self.layer_bottom_heights = np.round(np.array(layer_heights[:-1]), 2) - self.layer_top_heights = np.round(np.array(layer_heights[1:]), 2) - self.layer_center_heights = np.round((self.layer_bottom_heights + self.layer_top_heights) / 2, 2) + self._set_layer_geometry( + self.config_analysis.get_list_of_values('model_parameters', 'domain_bounds', dtype=float), + int(self.config_analysis['model_parameters']['num_layers']), + ) self.path_simulation = path_simulation self.led_info_path = os.path.join(self.path_simulation, 'analysis', 'led_search_areas_with_coordinates.csv') - if self.average_images == True: - image_infos_file = 'analysis/image_infos_analysis_avg.csv' - else: - image_infos_file = 'analysis/image_infos_analysis.csv' + image_infos_file = ( + 'analysis/image_infos_analysis_avg.csv' + if self.average_images + else 'analysis/image_infos_analysis.csv' + ) self.image_info_df = pd.read_csv(os.path.join(self.path_simulation, image_infos_file)) - if read_all == True: + if read_all: self.read_all() - if remove_duplicates == True: + if remove_duplicates: self.remove_duplicate_heights() - layer_from_height = lambda self, height: int( - (height - self.lower_domain_bound) / (self.upper_domain_bound - self.lower_domain_bound) * self.n_layers) - - def _apply_moving_average(self, df: pd.DataFrame, window: int, smooth: str = 'ma') -> pd.DataFrame: - """ - Applies a moving average or median smoothing to a DataFrame over time. - - If window is 1, the raw data is returned without any smoothing. Otherwise, a centered - rolling window is applied using either mean or median. The window is centered by shifting - the result by half the window size, so that the smoothed value at time t is computed from - an equal number of past and future values. - - :param df: The input DataFrame to smooth. The index is assumed to represent time steps. - :type df: pd.DataFrame - :param window: The number of time steps to include in the rolling window. A value of 1 - returns the raw data unchanged. - :type window: int - :param smooth: The smoothing method to apply. Use 'ma' for moving average (mean) or - 'median' for rolling median. Defaults to 'ma'. - :type smooth: str, optional - :return: A DataFrame with the same shape as the input, either smoothed or unchanged if - window is 1. - :rtype: pd.DataFrame - """ - if window == 1: - return df - if smooth == 'median': - return df.rolling(window=window, closed='right').median().shift(-int(window / 2) + 1) - else: - return df.rolling(window=window, closed='right').mean().shift(-int(window / 2) + 1) - - def get_closest_time(self, time): - """ - Returns closest time index from self.all_extco_df - - :param time: Target time to find closest match for - :type time: int or float - :return: Closest matching time index - :rtype: int - """ - return self.all_extco_df.index[abs(self.all_extco_df.index - time).argmin()] - - def set_timeshift(self, timedelta: int): + # set_timeshift override: also updates LED-parameter time axis + def set_timeshift(self, timedelta): """ Sets the time shift for the experiment's start time. :param timedelta: Time shift in seconds. :type timedelta: int """ - if self.all_extco_df is not None: - self.all_extco_df.index += timedelta + super().set_timeshift(timedelta) self.led_params_timedelta = timedelta - def _get_ledparams_df_from_path(self, channel: int) -> pd.DataFrame: + # ------------------------------------------------------------------ + # Data loading + # ------------------------------------------------------------------ + + def _get_ledparams_df_from_path(self, channel): """ Reads experimental parameters from a binary HDF5 table based on color channel. @@ -148,12 +327,16 @@ def _get_ledparams_df_from_path(self, channel: int) -> pd.DataFrame: :return: DataFrame with LED parameters. :rtype: pd.DataFrame """ - if self.average_images == True: + if self.average_images: file = os.path.join(self.path_simulation, 'analysis', f'channel{channel}', 'all_parameters_avg.h5') else: file = os.path.join(self.path_simulation, 'analysis', f'channel{channel}', 'all_parameters.h5') table = pd.read_hdf(file, key='channel_values') - time = pd.Series(self.image_info_df['Experiment_Time[s]'].astype(int).values, index=self.image_info_df['#ID'], name='Experiment_Time[s]') + time = pd.Series( + self.image_info_df['Experiment_Time[s]'].astype(int).values, + index=self.image_info_df['#ID'], + name='Experiment_Time[s]', + ) table = table.merge(time, left_on='img_id', right_index=True) table.set_index(['Experiment_Time[s]'], inplace=True) self.led_heights = table['height'] @@ -161,294 +344,138 @@ def _get_ledparams_df_from_path(self, channel: int) -> pd.DataFrame: def _get_extco_df_from_path(self): """ - Read all extinction coefficients from the simulation dir and put them in the all_extco_df. + Read all extinction coefficients from the simulation dir and put them in all_extco_df. """ extco_list = [] files_list = glob.glob( - os.path.join(self.path_simulation, 'analysis', 'extinction_coefficients', self.solver, - f'extinction_coefficients*.csv')) + os.path.join( + self.path_simulation, 'analysis', 'extinction_coefficients', + self.solver, 'extinction_coefficients*.csv', + ) + ) for file in files_list: file_df = pd.read_csv(file, skiprows=7) channel = int(file.split('channel_')[1].split('_')[0]) led_array = int(file.split('array_')[1].split('.')[0]) - # For backwards compatibility, check if Experiment_Time[s] is already in the dataframe if '# Experiment_Time[s]' not in file_df.columns: time = self.image_info_df['Experiment_Time[s]'].astype(int) file_df = file_df.merge(time, left_index=True, right_index=True) else: - file_df.rename(columns={'# Experiment_Time[s]': 'Experiment_Time[s]'}, - inplace=True) # TODO: this is just a workaround, do better... + file_df.rename(columns={'# Experiment_Time[s]': 'Experiment_Time[s]'}, inplace=True) file_df.set_index('Experiment_Time[s]', inplace=True) n_layers = len(file_df.columns) - iterables = [[channel], [led_array], [i for i in range(0, n_layers)]] - file_df.columns = pd.MultiIndex.from_product(iterables, names=["Channel", "LED Array", "Layer"]) + file_df.columns = pd.MultiIndex.from_product( + [[channel], [led_array], list(range(n_layers))], + names=["Channel", "LED Array", "Layer"], + ) extco_list.append(file_df) self.all_extco_df = pd.concat(extco_list, axis=1) self.all_extco_df.sort_index(ascending=True, axis=1, inplace=True) - self.all_extco_df = self.all_extco_df[ - ~self.all_extco_df.index.duplicated(keep='first')] # Remove duplicate times + self.all_extco_df = self.all_extco_df[~self.all_extco_df.index.duplicated(keep='first')] def read_led_params(self): - """Read led parameters for all color channels from the simulation path""" + """Read led parameters for all color channels from the simulation path.""" self.ch0_ledparams_df = self._get_ledparams_df_from_path(0) self.ch1_ledparams_df = self._get_ledparams_df_from_path(1) self.ch2_ledparams_df = self._get_ledparams_df_from_path(2) def read_all(self): - """Read led parameters and extionciton coefficients for all color channels from the simulation path""" + """Read led parameters and extinction coefficients for all channels.""" self.read_led_params() self._get_extco_df_from_path() def remove_duplicate_heights(self): - """ Removes duplicate height entries for each LED parameter DataFrame across all colorchannels.""" + """Remove duplicate height entries for each LED parameter DataFrame.""" self.ch0_ledparams_df = self.ch0_ledparams_df.groupby(['Experiment_Time[s]', 'height']).last() self.ch1_ledparams_df = self.ch1_ledparams_df.groupby(['Experiment_Time[s]', 'height']).last() self.ch2_ledparams_df = self.ch2_ledparams_df.groupby(['Experiment_Time[s]', 'height']).last() - def get_extco_at_time(self, channel: int, time: int, yaxis='layer', height_reference='center', window=1, smooth='ma') -> pd.DataFrame: - """ - Retrieves a DataFrame containing smoothed extinction coefficients at a specific time. - - This method extracts the extinction coefficients for a specified channel and time, - applies smoothing over time using either a moving average or median based on the specified window size. - It restructures the DataFrame to have layers or heights as indices and LED arrays as columns. - The method first selects the extinction coefficients for the specified channel. It then applies the specified smoothing - operation across a defined window of timesteps. The resulting data is then pivoted to organize extinction coefficients - by LED array and layer or height, depending on the 'yaxis' parameter, providing a structured view suitable for analysis or visualization. - - :param channel: The channel index to extract extinction coefficients for. - :type channel: int - :param time: The time at which to extract extinction coefficients. - :type time: int - :param yaxis: Determines whether the y-axis of the returned DataFrame should represent 'layer' or 'height'. Defaults to 'layer'. - :type yaxis: str, optional - :param height_reference: Specifies the reference point for height calculation. Acceptable values are 'center', 'top', and 'bottom'. - :type height_reference: str - :param window: The window size for the smoothing. A value of 1 returns raw data without any smoothing applied. - Defaults to 1. - :type window: int, optional - :param smooth: The smoothing method to use, either 'ma' for moving average or 'median' for median. Defaults to 'ma'. - :type smooth: str, optional - :return: A pandas DataFrame with the smoothed extinction coefficients. Indices represent either layers or heights, and columns represent LED arrays. - :rtype: pd.DataFrame - """ - ch_extco_df = self.all_extco_df.xs(channel, level=0, axis=1) - ma_ch_extco_df = self._apply_moving_average(ch_extco_df, window, smooth) - ma_ch_extco_df = ma_ch_extco_df.loc[time, :] - ma_ch_extco_df = ma_ch_extco_df.reset_index().pivot(columns='LED Array', index='Layer') - ma_ch_extco_df.columns = ma_ch_extco_df.columns.droplevel() - ma_ch_extco_df.index = range(ma_ch_extco_df.shape[0]) - if yaxis == 'layer': - ma_ch_extco_df.index.names = ["Layer"] - elif yaxis == 'height': - if height_reference == 'center': - ma_ch_extco_df.index = self.layer_center_heights - elif height_reference == 'top': - ma_ch_extco_df.index = self.layer_top_heights - elif height_reference == 'bottom': - ma_ch_extco_df.index = self.layer_bottom_heights - else: - raise ValueError("height_reference must be 'center', 'top', or 'bottom'") - ma_ch_extco_df.index.names = ["Height / m"] - return ma_ch_extco_df - - def get_extco_at_led_array(self, channel: int, led_array_id: int, yaxis='layer', height_reference='center', window=1, - smooth='ma') -> pd.DataFrame: - """ - Retrieves a DataFrame containing smoothed extinction coefficients for a specific LED array. - - This method extracts the extinction coefficients for a specified channel and LED array, applies smoothing - over time based on the specified window size, and restructures the DataFrame to have experimental time as - the index and layers (or heights) as columns. - - :param channel: The channel index from which to extract extinction coefficients. - :type channel: int - :param led_array_id: The ID for which to extract extinction coefficients. - :type led_array_id: int - :param yaxis: Determines whether the DataFrame's columns should represent 'layer' or 'height'. Defaults to 'layer'. - :type yaxis: str, optional - :param height_reference: Specifies the reference point for height calculation. Acceptable values are 'center', 'top', and 'bottom'. - :type height_reference: str - :param window: The window size for the smoothing. A value of 1 returns raw data without any smoothing applied. - Defaults to 1. - :type window: int, optional - :param smooth: The smoothing method to use, either 'ma' for moving average or 'median' for median. Defaults to 'ma'. - :type smooth: str, optional - :return: A pandas DataFrame with the smoothed extinction coefficients. The index represents the experimental - time, and the columns represent either layers or heights, depending on the 'yaxis' parameter. - :rtype: pd.DataFrame - """ - ch_extco_df = self.all_extco_df.xs(channel, level=0, axis=1).xs(led_array_id, level=0, axis=1) - ma_ch_extco_df = self._apply_moving_average(ch_extco_df, window, smooth) - - if yaxis == 'layer': - ma_ch_extco_df.columns.names = ["Layer"] - elif yaxis == 'height': - if height_reference == 'center': - ma_ch_extco_df.index = self.layer_center_heights - elif height_reference == 'top': - ma_ch_extco_df.index = self.layer_top_heights - elif height_reference == 'bottom': - ma_ch_extco_df.index = self.layer_bottom_heights - else: - raise ValueError("height_reference must be 'center', 'top', or 'bottom'") - return ma_ch_extco_df - - def get_extco_at_layer(self, channel: int, layer: int, window=1, smooth='ma') -> pd.DataFrame: - """ - Retrieves a DataFrame containing smoothed extinction coefficients for a specified layer. - - This method extracts the extinction coefficients for a given channel and layer, then applies smoothing - over the specified window of time. The result is a DataFrame with experimental time as the index and - LED arrays as the columns, providing a time series of extinction coefficients at the specified layer. - - :param channel: The channel index from which to extract extinction coefficients. - :type channel: int - :param layer: The layer number for which to extract extinction coefficients. Layers correspond to - different vertical positions in the experimental setup. - :type layer: int - :param window: The window size for the smoothing. A value of 1 returns raw data without any smoothing - applied. Defaults to 1. - :type window: int, optional - :param smooth: The smoothing method to use, either 'ma' for moving average or 'median' for median. - Defaults to 'ma'. - :type smooth: str, optional - :return: A pandas DataFrame with smoothed extinction coefficients. The DataFrame's index represents - experimental time, and its columns represent different LED arrays within the specified layer. - :rtype: pd.DataFrame - """ - ch_extco_df = self.all_extco_df.xs(channel, level=0, axis=1).xs(layer, level=1, axis=1) - return self._apply_moving_average(ch_extco_df, window, smooth) - - def get_extco_at_height(self, channel: int, height: float, window=1, smooth='ma') -> pd.DataFrame: - """ - Retrieves a DataFrame containing smoothed extinction coefficients for a specified height. - - Converts the given height to a layer index and delegates to :meth:`get_extco_at_layer`. - The result is a DataFrame with experimental time as the index and LED arrays as the columns, - providing a time series of extinction coefficients at the specified height. - - :param channel: The channel index from which to extract extinction coefficients. - :type channel: int - :param height: The height in meters for which to extract extinction coefficients. - :type height: float - :param window: The window size for the smoothing. A value of 1 returns raw data without any smoothing - applied. Defaults to 1. - :type window: int, optional - :param smooth: The smoothing method to use, either 'ma' for moving average or 'median' for median. - Defaults to 'ma'. - :type smooth: str, optional - :return: A pandas DataFrame with smoothed extinction coefficients. The DataFrame's index represents - experimental time, and its columns represent different LED arrays within the specified height. - :rtype: pd.DataFrame - """ - layer = self.layer_from_height(height) - return self.get_extco_at_layer(channel, layer, window, smooth) + # ------------------------------------------------------------------ + # LED parameter queries + # ------------------------------------------------------------------ - def get_ledparams_at_led_array(self, channel: int, led_array_id: int, param='sum_col_val', yaxis='led_id', window=1, - n_ref=None) -> pd.DataFrame: + def get_ledparams_at_led_array(self, channel, led_array_id, param='sum_col_val', yaxis='led_id', window=1, n_ref=None): """ - Retrieves a DataFrame containing normalized LED parameters for a specific LED array, optionally smoothed over - time. This method selects LED parameter data for a given channel and LED array. It normalizes the data based on - the average of the first `n_ref` entries (if `n_ref` is not False), and applies smoothing over the specified - window of time. The result is a DataFrame with experimental time as the index and LED identifiers (or heights) - as the columns, depending on the `yaxis` parameter. + Retrieves a DataFrame containing normalized LED parameters for a specific LED array. :param channel: The channel index from which to extract LED parameters. :type channel: int - :param led_array_id: The ID of the LED array for which to extract LED parameters. + :param led_array_id: The ID for which to extract LED parameters. :type led_array_id: int - :param param: The specific LED parameter to extract and analyze, such as 'sum_col_val'. Defaults to 'sum_col_val'. + :param param: The specific LED parameter to extract and analyze. Defaults to 'sum_col_val'. :type param: str, optional - :param yaxis: Determines the labeling of the DataFrame's columns, either 'led_id' for LED identifiers or - 'height' for physical heights. Defaults to 'led_id'. + :param yaxis: Column labeling: ``'led_id'`` or ``'height'``. Defaults to ``'led_id'``. :type yaxis: str, optional - :param window: The window size for the smoothing. A value of 1 returns raw data without any smoothing - applied. Defaults to 1. + :param window: Smoothing window size. 1 = no smoothing. :type window: int, optional - :param n_ref: The number of initial entries to average for normalization. If set to False, absolute values - are returned without normalization. Defaults to None, which uses self.num_ref_images. + :param n_ref: Number of initial entries for normalisation. ``False`` disables + normalisation. Defaults to ``None`` (uses :attr:`num_ref_images`). :type n_ref: int or bool, optional - :return: A pandas DataFrame with normalized (and optionally smoothed) LED parameters. The index represents - experimental time, and columns represent LED identifiers or heights. + :return: DataFrame; index = time, columns = LED IDs or heights. :rtype: pd.DataFrame """ - if channel == 0: - led_params = self.ch0_ledparams_df - elif channel == 1: - led_params = self.ch1_ledparams_df - elif channel == 2: - led_params = self.ch2_ledparams_df + led_params = {0: self.ch0_ledparams_df, 1: self.ch1_ledparams_df, 2: self.ch2_ledparams_df}[channel] index = 'height' if yaxis == 'height' else 'led_id' led_params = led_params.reset_index().set_index(['Experiment_Time[s]', index]) - led_params.index = led_params.index.set_levels(led_params.index.levels[0] + self.led_params_timedelta, level=0) + led_params.index = led_params.index.set_levels( + led_params.index.levels[0] + self.led_params_timedelta, level=0 + ) ii = led_params[led_params['led_array_id'] == led_array_id][[param]] - if n_ref == False: + if n_ref is False: rel_i = ii else: n_ref = self.num_ref_images if n_ref is None else n_ref - i0 = ii.groupby([index]).agg(lambda g: g.iloc[0:n_ref].mean()) + i0 = ii.groupby([index]).agg(lambda g: g.iloc[:n_ref].mean()) rel_i = ii / i0 - rel_i = rel_i.reset_index().pivot(columns=index, index='Experiment_Time[s]') rel_i.columns = rel_i.columns.droplevel() return self._apply_moving_average(rel_i, window) - def get_ledparams_at_led_id(self, channel: int, led_id: int, param='sum_col_val', window=1, - n_ref=None) -> pd.DataFrame: + def get_ledparams_at_led_id(self, channel, led_id, param='sum_col_val', window=1, n_ref=None): """ - Retrieves a DataFrame containing normalized LED parameters for a specific LED ID, optionally smoothed over time. - - This method selects LED parameter data for a given channel and LED ID, normalizes the data based on the average - of the first `n_ref` entries (if `n_ref` is not False), and applies smoothing over the specified window of time. - The result is a DataFrame with experimental time as the index, providing a time series of the specified LED - parameter for the selected LED. + Retrieves a DataFrame containing normalized LED parameters for a specific LED ID. :param channel: The channel index from which to extract LED parameters. :type channel: int :param led_id: The identifier of the LED for which to extract parameters. :type led_id: int - :param param: The specific LED parameter to extract and analyze, such as 'sum_col_val'. Defaults to 'sum_col_val'. + :param param: The specific LED parameter to extract and analyze. Defaults to 'sum_col_val'. :type param: str, optional - :param window: The window size for the smoothing. A value of 1 returns raw data without any smoothing - applied. Defaults to 1. + :param window: Smoothing window size. 1 = no smoothing. :type window: int, optional - :param n_ref: The number of initial entries to average for normalization. If set to False, absolute values - are returned without normalization. Defaults to None, which uses self.num_ref_images. + :param n_ref: Number of initial entries for normalisation. ``False`` disables + normalisation. Defaults to ``None`` (uses :attr:`num_ref_images`). :type n_ref: int or bool, optional - :return: A pandas DataFrame with normalized (and optionally smoothed) LED parameters for the specified LED ID. - The DataFrame's index represents experimental time. + :return: DataFrame indexed by time with the specified LED parameter. :rtype: pd.DataFrame """ - if channel == 0: - led_params = self.ch0_ledparams_df - elif channel == 1: - led_params = self.ch1_ledparams_df - elif channel == 2: - led_params = self.ch2_ledparams_df + led_params = {0: self.ch0_ledparams_df, 1: self.ch1_ledparams_df, 2: self.ch2_ledparams_df}[channel] led_params = led_params.reset_index().set_index(['Experiment_Time[s]']) ii = led_params[led_params['led_id'] == led_id][[param]] - if n_ref == False: + if n_ref is False: rel_i = ii else: n_ref = self.num_ref_images if n_ref is None else n_ref - i0 = ii.iloc[0:n_ref].mean() + i0 = ii.iloc[:n_ref].mean() rel_i = ii / i0 return self._apply_moving_average(rel_i, window) - def get_image_name_from_time(self, time: int): + # ------------------------------------------------------------------ + # Image helpers + # ------------------------------------------------------------------ + + def get_image_name_from_time(self, time): """ - Retrieves the image name corresponding to a specific times. + Retrieves the image name corresponding to a specific time. :param time: The time index. :type time: int :return: The name of the image. :rtype: str """ - imagename = self.image_info_df.loc[self.image_info_df['Experiment_Time[s]'] == time]['Name'].values[0] - return imagename + return self.image_info_df.loc[self.image_info_df['Experiment_Time[s]'] == time]['Name'].values[0] - def get_pixel_cordinates_of_LED(self, led_id: int): + def get_pixel_cordinates_of_LED(self, led_id): """ Returns the pixel coordinates of a specified LED. @@ -458,41 +485,134 @@ def get_pixel_cordinates_of_LED(self, led_id: int): :rtype: list """ led_info_df = pd.read_csv(self.led_info_path) - pixel_positions = \ - led_info_df.loc[led_info_df.index == led_id][[' pixel position x', ' pixel position y']].values[0] - return pixel_positions + return led_info_df.loc[led_info_df.index == led_id][ + [' pixel position x', ' pixel position y'] + ].values[0] - def get_pixel_values_of_led(self, led_id: int, channel: int, time: int, radius=None): + def get_pixel_values_of_led(self, led_id, channel, time, radius=None): """ - Retrieves a cropped numpy array of pixel values around a specified LED, based on its ID, for a given image channel and time. - - This method calculates the pixel values in a specified radius around an LED's position on an image. It first determines the LED's pixel coordinates, then retrieves the image corresponding to the specified time, and finally extracts a square array of pixel values centered on the LED's location. + Retrieves a cropped numpy array of pixel values around a specified LED. - :param led_id: The identifier for the LED of interest. This ID is used to look up the LED's position. + :param led_id: The identifier for the LED of interest. :type led_id: int - :param channel: The image channel from which to extract pixel values. Different channels may represent different color channels or sensor readings. + :param channel: The image channel from which to extract pixel values. :type channel: int - :param time: The time at which the image was taken. This corresponds to a specific moment in the experimental timeline. + :param time: The time at which the image was taken. :type time: int - :param radius: The radius around the LED's position from which to extract pixel values. If not specified, the search_area_radius from the config file is used. Defaults to None. + :param radius: Pixel radius around the LED. If not specified, uses + ``search_area_radius`` from the config file. :type radius: int, optional - :return: A numpy array containing the pixel values in the specified radius around the LED. The array is cropped from the original image, centered on the LED's position. + :return: A numpy array of pixel values around the LED. :rtype: numpy.ndarray - :raises ValueError: If path_images is not available when trying to access image files. + :raises ValueError: If ``path_images`` is not available. """ if not self.path_images: raise ValueError("path_images is required for accessing image files") - - # Use the provided radius or fall back to search_area_radius from config radius = radius if radius is not None else self.search_area_radius - if radius: pixel_positions = self.get_pixel_cordinates_of_LED(led_id) imagename = self.get_image_name_from_time(time) imagefile = os.path.join(self.path_images, imagename) channel_array = read_channel_data_from_img(imagefile, channel) - x = pixel_positions[0] - y = pixel_positions[1] - channel_array_cropped = np.flip(channel_array[x - radius:x + radius, y - radius:y + radius], axis=0) - return channel_array_cropped + x, y = pixel_positions[0], pixel_positions[1] + return np.flip(channel_array[x - radius:x + radius, y - radius:y + radius], axis=0) return None + + +# ────────────────────────────────────────────────────────────────────────────── +# Multi-camera stacked analysis +# ────────────────────────────────────────────────────────────────────────────── + +class StackedSimData(_BaseSimData): + """ + Postprocessing interface for stacked multi-camera extinction coefficient results. + + Reads ``config_stacked.ini`` from *config_path*, discovers the stacked CSV + output files produced by + :class:`~ledsa.analysis.StackedExtinctionCoefficients`, and exposes the same + :class:`_BaseSimData` query API for those results. + + LED parameter data is not available through this class because stacked results + aggregate rays from multiple cameras. Use individual per-camera + :class:`SimData` objects for raw LED intensities. + + :ivar config_stacked: Parsed stacked-analysis configuration. + :vartype config_stacked: ConfigDataStacked + """ + + def __init__(self, config_path='.', read_all=True): + """ + :param config_path: Directory containing ``config_stacked.ini``. + Defaults to the current working directory. + :type config_path: str + :param read_all: If ``True``, immediately load all stacked extinction + coefficient CSV files. Defaults to ``True``. + :type read_all: bool + """ + super().__init__() + + prev_dir = os.getcwd() + try: + os.chdir(config_path) + self.config_stacked = ConfigDataStacked() + finally: + os.chdir(prev_dir) + + self.config_path = os.path.abspath(config_path) + self.output_path = os.path.join(self.config_path, str(self.config_stacked.output_path)) + + self.solver = self.config_stacked.solver + self.camera_channels = self.config_stacked.camera_channels + self.num_ref_images = self.config_stacked.num_ref_images + self._set_layer_geometry(self.config_stacked.domain_bounds, self.config_stacked.num_layers) + + if read_all: + self._get_extco_df_from_path() + + def _get_extco_df_from_path(self): + """ + Discover and load all stacked extinction coefficient CSV files. + + Files are expected in:: + + /analysis/extinction_coefficients// + + The virtual LED-array ID is stored as an integer in the + ``all_extco_df`` MultiIndex (level ``"LED Array"``). + """ + extco_list = [] + extco_dir = os.path.join( + self.output_path, 'analysis', 'extinction_coefficients', self.solver, + ) + files = glob.glob(os.path.join(extco_dir, 'extinction_coefficients*.csv')) + + if not files: + print( + f'No stacked extinction coefficient files found in:\n {extco_dir}\n' + 'Run the stacked analysis first with: python -m ledsa -as' + ) + return + + for filepath in files: + fname = os.path.basename(filepath) + channel = int(fname.split('channel_')[1].split('_')[0]) + led_array_id = int(fname.split('_led_array_')[1].removesuffix('.csv')) + + file_df = pd.read_csv(filepath, skiprows=4) + file_df.rename(columns={'# Experiment_Time[s]': 'Experiment_Time[s]'}, inplace=True) + file_df['Experiment_Time[s]'] = file_df['Experiment_Time[s]'].astype(float) + file_df.set_index('Experiment_Time[s]', inplace=True) + + n_layers = len(file_df.columns) + file_df.columns = pd.MultiIndex.from_product( + [[channel], [led_array_id], list(range(n_layers))], + names=["Channel", "LED Array", "Layer"], + ) + extco_list.append(file_df) + + if not extco_list: + return + + self.all_extco_df = pd.concat(extco_list, axis=1) + self.all_extco_df.sort_index(ascending=True, axis=1, inplace=True) + self.all_extco_df = self.all_extco_df[~self.all_extco_df.index.duplicated(keep='first')] \ No newline at end of file From 6ed506051fecc165dc7f1fd2bdb3287077ace421 Mon Sep 17 00:00:00 2001 From: Kristian Date: Thu, 19 Mar 2026 22:15:30 +0100 Subject: [PATCH 6/7] add example for postprocessing of stacked analysis --- examples/postprocessing_stacked.ipynb | 226 ++++++++++++++++++++++++++ 1 file changed, 226 insertions(+) create mode 100644 examples/postprocessing_stacked.ipynb diff --git a/examples/postprocessing_stacked.ipynb b/examples/postprocessing_stacked.ipynb new file mode 100644 index 0000000..759f610 --- /dev/null +++ b/examples/postprocessing_stacked.ipynb @@ -0,0 +1,226 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "collapsed": true, + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "# Dependencies" + ] + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "import os\n", + "import numpy as np\n", + "\n", + "import matplotlib.pyplot as plt\n", + "from matplotlib import rcParams\n", + "from mpl_toolkits.axes_grid1.inset_locator import InsetPosition\n", + "import seaborn as sns\n", + "\n", + "from ledsa.postprocessing.simulation import StackedSimData" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": "# Display Config", + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "source": [ + "rcParams['font.family'] = 'serif'\n", + "rcParams['font.size'] = 10\n", + "cm = 1 / 2.54" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": "# Load Simulation Data", + "metadata": { + "collapsed": false + } + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "# Input Dir (Simulations)\n", + "path_simulation = '/Path/to/simulation'\n", + "sim = StackedSimData(path_simulation)" + ], + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "# Extinction Coefficients" + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "## Plot extinction coefficients as a function of time for a specific LED array and height" + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "# Parameters\n", + "led_array_id = 1 # LED array to analyze\n", + "window = 3 # Size of moving average window\n", + "color_channels = [0, 1, 2] # RGB channels\n", + "height = 2\n", + "\n", + "# Create figure\n", + "fig, ax = plt.subplots(figsize=(10, 6))\n", + "\n", + "# Plot extinction coefficients for each channel\n", + "for channel in color_channels:\n", + " # Get extinction coefficients at specified height\n", + " extco = sim.get_extco_at_height(channel=channel, height=height, window=window)\n", + "\n", + " # Plot with different colors for each channel\n", + " colors = ['red', 'green', 'blue']\n", + " ax.plot(extco.index, extco.iloc[:, led_array_id],\n", + " color=colors[channel],\n", + " label=f'Channel {channel}')\n", + "\n", + "ax.set_xlabel('Time / s')\n", + "ax.set_ylabel('Extinction Coefficient / $\\mathrm{m^{-1}}$')\n", + "ax.set_title(f'Extinction Coefficients at Height {height} m, LED Array {led_array_id}')\n", + "ax.grid(True)\n", + "ax.legend()\n", + "plt.tight_layout()\n", + "plt.show()" + ], + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "## Plot extinction coefficients as a function of height and LED array for a specific point in time" + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "# Parameters\n", + "time = 200 # Time point to analyze in seconds\n", + "channel = 0 # RGB channels\n", + "window = 20 # Size of moving average window\n", + "\n", + "# Find closest time point to given time\n", + "closest_time = sim.get_closest_time(time)\n", + "\n", + "extco = sim.get_extco_at_time(channel=channel, time=closest_time, window=window, yaxis='height')\n", + "sns.heatmap(extco.iloc[::-1], cmap='jet', vmax=0.4, cbar_kws={'label': 'Extinction Coefficient / $\\mathrm{m^{-1}}$'})\n", + "plt.tight_layout()" + ], + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "## Plot extinction coefficients as a function of height for a specific LED array and point in time\n" + }, + { + "cell_type": "code", + "source": [ + "# Parameters\n", + "time = 200 # Time point to analyze in seconds\n", + "window = 3 # Size of moving average window\n", + "led_array_id = 2 # LED array to analyze\n", + "color_channels = [0, 1, 2] # RGB channels\n", + "\n", + "# Find closest time point to given time\n", + "closest_time = sim.get_closest_time(time)\n", + "\n", + "# Create figure\n", + "fig, ax = plt.subplots(figsize=(10, 6))\n", + "\n", + "# Plot extinction coefficients for each channel\n", + "for channel in color_channels:\n", + " # Get extinction coefficients at specified time\n", + " extco = sim.get_extco_at_time(channel=channel, time=closest_time, window=window, yaxis='height')\n", + "\n", + " # Plot with different colors for each channel\n", + " colors = ['red', 'green', 'blue']\n", + " ax.plot(extco.iloc[:, led_array_id], extco.index,\n", + " color=colors[channel],\n", + " label=f'Channel {channel}')\n", + "\n", + "ax.set_xlabel('Extinction Coefficient / $\\mathrm{m^{-1}}$')\n", + "ax.set_ylabel('Height / m')\n", + "ax.set_title(f'Extinction Coefficients at Time {closest_time} s, LED Array {led_array_id}')\n", + "ax.grid(True)\n", + "ax.legend()\n", + "plt.tight_layout()\n", + "plt.show()\n" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "code", + "source": "", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "code", + "source": "", + "outputs": [], + "execution_count": null + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From 8c424ff04df613dacd3edb5e5076af60f1993f92 Mon Sep 17 00:00:00 2001 From: Kristian Date: Thu, 19 Mar 2026 23:38:10 +0100 Subject: [PATCH 7/7] update doc --- doc/analysis.rst | 8 ++++++++ doc/cli_documentation.rst | 23 +++++++++++++++++++++-- doc/config.rst | 8 ++++++++ 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/doc/analysis.rst b/doc/analysis.rst index ee2abf2..00fd4ac 100644 --- a/doc/analysis.rst +++ b/doc/analysis.rst @@ -22,6 +22,14 @@ Analysis :show-inheritance: .. automodule:: ledsa.analysis.ExperimentData + :members: + :undoc-members: + :show-inheritance: + +Stacked Multi-Camera Analysis +------------------------------ + +.. automodule:: ledsa.analysis.StackedExtinctionCoefficients :members: :undoc-members: :show-inheritance: \ No newline at end of file diff --git a/doc/cli_documentation.rst b/doc/cli_documentation.rst index 27e49fd..fe73ae6 100644 --- a/doc/cli_documentation.rst +++ b/doc/cli_documentation.rst @@ -1,4 +1,3 @@ - CLI Documentation ================= @@ -68,6 +67,26 @@ Analysis .. warning:: Color Correction is in a test state and has not yet been sufficiently evaluated. The application may lead to incorrect results. +Stacked Multi-Camera Analysis +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. list-table:: + :widths: 20 50 30 + :header-rows: 1 + + * - Argument + - Description + - Options + * - ``-conf_s [FILENAME]``, ``--config_stacked [FILENAME]`` + - Create a template ``config_stacked.ini`` for multi-camera stacked analysis. The optional argument sets the output filename (default: ``config_stacked.ini``). + - ``--n_simulations N`` sets the number of simulation blocks in the template (default: 2). + * - ``--n_simulations N`` + - Number of ``[simulation_N]`` blocks written into the template ``config_stacked.ini``. Only used together with ``-conf_s``. + - Default: 2 + * - ``-as``, ``--analysis_stacked`` + - Run the stacked multi-camera extinction coefficient calculation using ``config_stacked.ini`` in the current directory. + - -- + Coordinates ^^^^^^^^^^^ @@ -117,4 +136,4 @@ Demo - Optional: Path to setup simulation and image directories. Default to ``'.'`` * - ``--run`` - Run the LEDSA demo in the current working directory. - - -- + - -- \ No newline at end of file diff --git a/doc/config.rst b/doc/config.rst index 6529729..3d4f383 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -17,3 +17,11 @@ ConfigDataAnalysis :members: :undoc-members: :show-inheritance: + +ConfigDataStacked +----------------- + +.. automodule:: ledsa.analysis.ConfigDataStacked + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file