Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
116 changes: 17 additions & 99 deletions flixopt/clustering/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,75 +50,6 @@ def _select_dims(da: xr.DataArray, period: Any = None, scenario: Any = None) ->
return da


def combine_slices(
slices: dict[tuple, np.ndarray],
extra_dims: list[str],
dim_coords: dict[str, list],
output_dim: str,
output_coord: Any,
attrs: dict | None = None,
) -> xr.DataArray:
"""Combine {(dim_values): 1D_array} dict into a DataArray.

This utility simplifies the common pattern of iterating over extra dimensions
(like period, scenario), processing each slice, and combining results.

Args:
slices: Dict mapping dimension value tuples to 1D numpy arrays.
Keys are tuples like ('period1', 'scenario1') matching extra_dims order.
extra_dims: Dimension names in order (e.g., ['period', 'scenario']).
dim_coords: Dict mapping dimension names to coordinate values.
output_dim: Name of the output dimension (typically 'time').
output_coord: Coordinate values for output dimension.
attrs: Optional DataArray attributes.

Returns:
DataArray with dims [output_dim, *extra_dims].

Raises:
ValueError: If slices is empty.
KeyError: If a required key is missing from slices.

Example:
>>> slices = {
... ('P1', 'base'): np.array([1, 2, 3]),
... ('P1', 'high'): np.array([4, 5, 6]),
... ('P2', 'base'): np.array([7, 8, 9]),
... ('P2', 'high'): np.array([10, 11, 12]),
... }
>>> result = combine_slices(
... slices,
... extra_dims=['period', 'scenario'],
... dim_coords={'period': ['P1', 'P2'], 'scenario': ['base', 'high']},
... output_dim='time',
... output_coord=[0, 1, 2],
... )
>>> result.dims
('time', 'period', 'scenario')
"""
if not slices:
raise ValueError('slices cannot be empty')

first = next(iter(slices.values()))
n_output = len(first)
shape = [n_output] + [len(dim_coords[d]) for d in extra_dims]
data = np.empty(shape, dtype=first.dtype)

for combo in np.ndindex(*shape[1:]):
key = tuple(dim_coords[d][i] for d, i in zip(extra_dims, combo, strict=True))
try:
data[(slice(None),) + combo] = slices[key]
except KeyError:
raise KeyError(f'Missing slice for key {key} (extra_dims={extra_dims})') from None

return xr.DataArray(
data,
dims=[output_dim] + extra_dims,
coords={output_dim: output_coord, **dim_coords},
attrs=attrs or {},
)


def _cluster_occurrences(cr: TsamClusteringResult) -> np.ndarray:
"""Compute cluster occurrences from ClusteringResult."""
counts = Counter(cr.cluster_assignments)
Expand Down Expand Up @@ -551,33 +482,22 @@ def _build_property_array(
name: str | None = None,
) -> xr.DataArray:
"""Build a DataArray property, handling both single and multi-dimensional cases."""
base_coords = base_coords or {}
periods = self._get_dim_values('period')
scenarios = self._get_dim_values('scenario')

# Build list of (dim_name, values) for dimensions that exist
extra_dims = []
if periods is not None:
extra_dims.append(('period', periods))
if scenarios is not None:
extra_dims.append(('scenario', scenarios))

# Simple case: no extra dimensions
if not extra_dims:
return xr.DataArray(get_data(self._results[()]), dims=base_dims, coords=base_coords, name=name)

# Multi-dimensional: stack data for each combination
first_data = get_data(next(iter(self._results.values())))
shape = list(first_data.shape) + [len(vals) for _, vals in extra_dims]
data = np.empty(shape, dtype=first_data.dtype) # Preserve dtype

for combo in np.ndindex(*[len(vals) for _, vals in extra_dims]):
key = tuple(extra_dims[i][1][idx] for i, idx in enumerate(combo))
data[(...,) + combo] = get_data(self._results[key])
slices = []
for key, cr in self._results.items():
da = xr.DataArray(get_data(cr), dims=base_dims, coords=base_coords or {}, name=name)
for dim_name, coord_val in zip(self._dim_names, key, strict=True):
da = da.expand_dims({dim_name: [coord_val]})
slices.append(da)

dims = base_dims + [dim_name for dim_name, _ in extra_dims]
coords = {**base_coords, **{dim_name: vals for dim_name, vals in extra_dims}}
return xr.DataArray(data, dims=dims, coords=coords, name=name)
if len(slices) == 1:
result = slices[0]
else:
combined = xr.combine_by_coords(slices)
if isinstance(combined, xr.Dataset):
result = combined[name]
else:
result = combined
return result.transpose(*base_dims, *self._dim_names)

@staticmethod
def _key_to_str(key: tuple) -> str:
Expand Down Expand Up @@ -628,10 +548,8 @@ def apply(self, data: xr.Dataset) -> AggregationResults:

results = {}
for key, cr in self._results.items():
# Build selector for this key
selector = dict(zip(self._dim_names, key, strict=False))

# Select the slice for this (period, scenario)
# Build selector from key based on dim_names
selector = {dim_name: key[i] for i, dim_name in enumerate(self._dim_names)}
data_slice = data.sel(**selector, drop=True) if selector else data

# Drop constant arrays and convert to DataFrame
Expand Down
4 changes: 3 additions & 1 deletion flixopt/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
"""

import logging
import warnings
from itertools import permutations
from typing import Any, Literal

Expand Down Expand Up @@ -644,7 +645,8 @@ def drop_constant_arrays(
axis = var.dims.index(dim)
data = var.values
# Use numpy operations directly for speed
with np.errstate(invalid='ignore'): # Ignore NaN warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning, message='All-NaN slice')
ptp = np.nanmax(data, axis=axis) - np.nanmin(data, axis=axis)
if np.all(ptp < atol):
drop_vars.append(name)
Expand Down
Loading