From 3c7bf701ba7d82ecd49749662d44d69a7c963233 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 28 Sep 2025 23:19:15 +0000 Subject: [PATCH 1/5] Initial plan From 4171c2b74c3b5b3db7d126ace2ba11f4879c1009 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 28 Sep 2025 23:31:29 +0000 Subject: [PATCH 2/5] Phase 2 complete: Updated core infrastructure docstrings to numpy format Co-authored-by: lachlangrose <7371904+lachlangrose@users.noreply.github.com> --- LoopStructural/__init__.py | 33 ++++-- LoopStructural/datatypes/_bounding_box.py | 87 ++++++++++++++ LoopStructural/datatypes/_structured_grid.py | 53 +++++++++ LoopStructural/interpolators/__init__.py | 6 +- .../features/fault/_fault_function_feature.py | 101 +++++++++++++--- LoopStructural/utils/observer.py | 110 ++++++++++++++++-- LoopStructural/utils/utils.py | 68 ++++++++--- 7 files changed, 409 insertions(+), 49 deletions(-) diff --git a/LoopStructural/__init__.py b/LoopStructural/__init__.py index 33ffe3330..de3e75522 100644 --- a/LoopStructural/__init__.py +++ b/LoopStructural/__init__.py @@ -23,8 +23,21 @@ loggers = {} @dataclass class LoopStructuralConfig: - """ - Configuration for LoopStructural + """Configuration for LoopStructural package. + + This dataclass holds configuration parameters for the LoopStructural + geological modelling package. + + Parameters + ---------- + nelements : int, optional + The default number of elements to use in interpolation, by default 10_000 + + Examples + -------- + >>> config = LoopStructuralConfig(nelements=50000) + >>> config.nelements + 50000 """ nelements: int = 10_000 @@ -42,15 +55,21 @@ class LoopStructuralConfig: def setLogging(level="info", handler=None): - """ - Set the logging parameters for log file or custom handler + """Set the logging parameters for log file or custom handler. Parameters ---------- - level : str - 'info', 'warning', 'error', 'debug' + level : str, optional + Logging level to set, by default "info" + Valid options: 'info', 'warning', 'error', 'debug' handler : logging.Handler, optional - A logging handler to use instead of the default StreamHandler + A logging handler to use instead of the default StreamHandler, by default None + + Examples + -------- + >>> import LoopStructural + >>> LoopStructural.setLogging('debug') + >>> LoopStructural.setLogging('info', logging.FileHandler('loop.log')) """ import LoopStructural diff --git a/LoopStructural/datatypes/_bounding_box.py b/LoopStructural/datatypes/_bounding_box.py index c4ea65ebe..6a7695af1 100644 --- a/LoopStructural/datatypes/_bounding_box.py +++ b/LoopStructural/datatypes/_bounding_box.py @@ -82,10 +82,24 @@ def __init__( @property def global_origin(self): + """Get the global origin of the bounding box. + + Returns + ------- + np.ndarray + The global origin coordinates + """ return self._global_origin @global_origin.setter def global_origin(self, global_origin): + """Set the global origin of the bounding box. + + Parameters + ---------- + global_origin : array_like + The global origin coordinates + """ if self.dimensions != len(global_origin): logger.warning( f"Global origin has {len(global_origin)} dimensions but bounding box has {self.dimensions}" @@ -94,20 +108,53 @@ def global_origin(self, global_origin): @property def global_maximum(self): + """Get the global maximum coordinates of the bounding box. + + Returns + ------- + np.ndarray + The global maximum coordinates (local maximum + global origin) + """ return self.maximum + self.global_origin @property def valid(self): + """Check if the bounding box has valid origin and maximum values. + + Returns + ------- + bool + True if both origin and maximum are set, False otherwise + """ return self._origin is not None and self._maximum is not None @property def origin(self) -> np.ndarray: + """Get the origin coordinates of the bounding box. + + Returns + ------- + np.ndarray + Origin coordinates + + Raises + ------ + LoopValueError + If the origin is not set + """ if self._origin is None: raise LoopValueError("Origin is not set") return self._origin @origin.setter def origin(self, origin: np.ndarray): + """Set the origin coordinates of the bounding box. + + Parameters + ---------- + origin : np.ndarray + Origin coordinates + """ if self.dimensions != len(origin): logger.warning( f"Origin has {len(origin)} dimensions but bounding box has {self.dimensions}" @@ -116,24 +163,64 @@ def origin(self, origin: np.ndarray): @property def maximum(self) -> np.ndarray: + """Get the maximum coordinates of the bounding box. + + Returns + ------- + np.ndarray + Maximum coordinates + + Raises + ------ + LoopValueError + If the maximum is not set + """ if self._maximum is None: raise LoopValueError("Maximum is not set") return self._maximum @maximum.setter def maximum(self, maximum: np.ndarray): + """Set the maximum coordinates of the bounding box. + + Parameters + ---------- + maximum : np.ndarray + Maximum coordinates + """ self._maximum = maximum @property def nelements(self): + """Get the total number of elements in the bounding box. + + Returns + ------- + int + Total number of elements (product of nsteps) + """ return self.nsteps.prod() @property def volume(self): + """Calculate the volume of the bounding box. + + Returns + ------- + float + Volume of the bounding box + """ return np.prod(self.maximum - self.origin) @property def bb(self): + """Get a numpy array containing origin and maximum coordinates. + + Returns + ------- + np.ndarray + Array with shape (2, n_dimensions) containing [origin, maximum] + """ return np.array([self.origin, self.maximum]) @nelements.setter diff --git a/LoopStructural/datatypes/_structured_grid.py b/LoopStructural/datatypes/_structured_grid.py index dd3229681..c767a83b0 100644 --- a/LoopStructural/datatypes/_structured_grid.py +++ b/LoopStructural/datatypes/_structured_grid.py @@ -8,6 +8,26 @@ @dataclass class StructuredGrid: + """A structured grid for storing 3D geological data. + + This class represents a regular 3D grid with properties and cell properties + that can be used for geological modelling and visualisation. + + Parameters + ---------- + origin : np.ndarray, optional + Origin point of the grid, by default [0, 0, 0] + step_vector : np.ndarray, optional + Step size in each direction, by default [1, 1, 1] + nsteps : np.ndarray, optional + Number of steps in each direction, by default [10, 10, 10] + cell_properties : Dict[str, np.ndarray], optional + Properties defined at cell centres, by default empty dict + properties : Dict[str, np.ndarray], optional + Properties defined at grid nodes, by default empty dict + name : str, optional + Name of the grid, by default "default_grid" + """ origin: np.ndarray = field(default_factory=lambda: np.array([0, 0, 0])) step_vector: np.ndarray = field(default_factory=lambda: np.array([1, 1, 1])) nsteps: np.ndarray = field(default_factory=lambda: np.array([10, 10, 10])) @@ -16,6 +36,13 @@ class StructuredGrid: name: str = "default_grid" def to_dict(self): + """Convert the structured grid to a dictionary representation. + + Returns + ------- + dict + Dictionary containing all grid properties and metadata + """ return { "origin": self.origin, "maximum": self.maximum, @@ -28,9 +55,28 @@ def to_dict(self): @property def maximum(self): + """Calculate the maximum coordinates of the grid. + + Returns + ------- + np.ndarray + Maximum coordinates (origin + nsteps * step_vector) + """ return self.origin + self.nsteps * self.step_vector def vtk(self): + """Convert the structured grid to a PyVista RectilinearGrid. + + Returns + ------- + pv.RectilinearGrid + PyVista grid object with all properties attached + + Raises + ------ + ImportError + If PyVista is not installed + """ try: import pyvista as pv except ImportError: @@ -65,6 +111,13 @@ def plot(self, pyvista_kwargs={}): @property def cell_centres(self): + """Calculate the coordinates of cell centres. + + Returns + ------- + tuple of np.ndarray + X, Y, Z coordinates of all cell centres + """ x = np.linspace( self.origin[0] + self.step_vector[0] * 0.5, self.maximum[0] + self.step_vector[0] * 0.5, diff --git a/LoopStructural/interpolators/__init__.py b/LoopStructural/interpolators/__init__.py index 401a00b84..b0a66d5e1 100644 --- a/LoopStructural/interpolators/__init__.py +++ b/LoopStructural/interpolators/__init__.py @@ -1,6 +1,8 @@ -""" -Interpolators and interpolation supports +"""Interpolators and interpolation supports for LoopStructural. +This module provides various interpolation methods and support structures +for geological modelling, including finite difference, piecewise linear, +and radial basis function interpolators. """ diff --git a/LoopStructural/modelling/features/fault/_fault_function_feature.py b/LoopStructural/modelling/features/fault/_fault_function_feature.py index e70dab1f7..615a4738f 100644 --- a/LoopStructural/modelling/features/fault/_fault_function_feature.py +++ b/LoopStructural/modelling/features/fault/_fault_function_feature.py @@ -6,7 +6,28 @@ class FaultDisplacementFeature(BaseFeature): - """ """ + """Geological feature representing fault displacement. + + This class models the displacement associated with a fault surface using + a fault frame and displacement function. + + Parameters + ---------- + fault_frame : StructuralFrame + The geometric frame describing the fault surface + displacement : callable + Function defining the fault displacement + name : str, optional + Name of the fault displacement feature, by default "fault_displacement" + model : GeologicalModel, optional + The geological model containing this feature, by default None + faults : list, optional + List of associated faults, by default [] + regions : list, optional + List of regions where this feature applies, by default [] + builder : object, optional + Builder object used to create this feature, by default None + """ def __init__( self, @@ -18,13 +39,24 @@ def __init__( regions=[], builder=None, ): - """ - Geological feature representing the fault displacement + """Initialize the fault displacement feature. Parameters ---------- - fault_frame - geometry of the fault - displacement - function defining fault displacement + fault_frame : StructuralFrame + The geometric frame describing the fault surface + displacement : callable + Function defining the fault displacement + name : str, optional + Name of the fault displacement feature, by default "fault_displacement" + model : GeologicalModel, optional + The geological model containing this feature, by default None + faults : list, optional + List of associated faults, by default [] + regions : list, optional + List of regions where this feature applies, by default [] + builder : object, optional + Builder object used to create this feature, by default None """ BaseFeature.__init__(self, f"{name}_displacement", model, faults, regions, builder) self.fault_frame = StructuralFrame( @@ -34,16 +66,17 @@ def __init__( self.displacement = displacement def evaluate_value(self, location): - """ - Return the value of the fault displacement + """Return the value of the fault displacement at given locations. Parameters ---------- - location + location : np.ndarray + Array of xyz coordinates where displacement should be evaluated Returns ------- - + np.ndarray + Fault displacement values at the given locations """ fault_suface = self.fault_frame.features[0].evaluate_value(location) fault_displacement = self.fault_frame.features[1].evaluate_value(location) @@ -52,16 +85,17 @@ def evaluate_value(self, location): return d def evaluate_gradient(self, location): - """ - get the scaled displacement + """Get the scaled displacement gradient at given locations. Parameters ---------- - location + location : np.ndarray + Array of xyz coordinates where displacement gradient should be evaluated Returns ------- - + np.ndarray + Fault displacement gradient values at the given locations """ fault_suface = self.fault_frame.features[0].evaluate_value(location) fault_displacement = self.fault_frame.features[1].evaluate_value(location) @@ -70,8 +104,22 @@ def evaluate_gradient(self, location): return d def evaluate_on_surface(self, location): - """ - TODO what is this for? + """Evaluate displacement specifically on the fault surface. + + Parameters + ---------- + location : np.ndarray + Array of xyz coordinates on the fault surface + + Returns + ------- + np.ndarray + Fault displacement values on the surface + + Notes + ----- + This method evaluates displacement only considering the fault displacement + and strike components, not the fault surface component. """ fault_displacement = self.fault_frame.features[1].evaluate_value(location) fault_strike = self.fault_frame.features[2].evaluate_value(location) @@ -79,7 +127,30 @@ def evaluate_on_surface(self, location): return d def get_data(self, value_map: Optional[dict] = None): + """Get data associated with this fault displacement feature. + + Parameters + ---------- + value_map : dict, optional + Optional mapping of values, by default None + + Notes + ----- + This method is not yet implemented for fault displacement features. + """ pass def copy(self, name: Optional[str] = None): + """Create a copy of this fault displacement feature. + + Parameters + ---------- + name : str, optional + Name for the copied feature, by default None + + Raises + ------ + NotImplementedError + This method is not yet implemented + """ raise NotImplementedError("Not implemented yet") diff --git a/LoopStructural/utils/observer.py b/LoopStructural/utils/observer.py index ba6e6e27c..92bd7a254 100644 --- a/LoopStructural/utils/observer.py +++ b/LoopStructural/utils/observer.py @@ -11,10 +11,26 @@ @runtime_checkable class Observer(Protocol): - """Objects implementing an *update* method can subscribe.""" + """Protocol for objects that can observe events from Observable objects. + + Classes implementing this protocol must provide an update method that + will be called when observed events occur. + """ def update(self, observable: "Observable", event: str, *args: Any, **kwargs: Any) -> None: - """Receive a notification.""" + """Receive a notification from an observable object. + + Parameters + ---------- + observable : Observable + The observable object that triggered the event + event : str + The name of the event that occurred + *args : Any + Positional arguments associated with the event + **kwargs : Any + Keyword arguments associated with the event + """ Callback = Callable[["Observable", str, Any], None] @@ -22,7 +38,16 @@ def update(self, observable: "Observable", event: str, *args: Any, **kwargs: Any class Disposable: - """A small helper that detaches an observer when disposed.""" + """A helper class that manages detachment of observers. + + This class provides a convenient way to detach observers from observables. + It can be used as a context manager for temporary subscriptions. + + Parameters + ---------- + detach : Callable[[], None] + Function to call when disposing of the observer + """ __slots__ = ("_detach",) @@ -44,7 +69,19 @@ def __exit__(self, exc_type, exc, tb): class Observable(Generic[T]): - """Base‑class that provides Observer pattern plumbing.""" + """Base class that implements the Observer pattern. + + This class provides the infrastructure for managing observers and + notifying them of events. Observers can be attached to specific events + or to all events. + + Attributes + ---------- + _observers : dict[str, weakref.WeakSet[Callback]] + Internal storage mapping event names to sets of callbacks + _any_observers : weakref.WeakSet[Callback] + Set of callbacks that listen to all events + """ #: Internal storage: mapping *event* → WeakSet[Callback] _observers: dict[str, weakref.WeakSet[Callback]] @@ -59,9 +96,19 @@ def __init__(self) -> None: # ‑‑‑ subscription api -------------------------------------------------- def attach(self, listener: Observer | Callback, event: str | None = None) -> Disposable: - """Register *listener* for *event* (all events if *event* is None). - - Returns a :class:`Disposable` so the caller can easily detach again. + """Register a listener for specific event or all events. + + Parameters + ---------- + listener : Observer | Callback + The observer object or callback function to attach + event : str | None, optional + The specific event to listen for. If None, listens to all events, by default None + + Returns + ------- + Disposable + A disposable object that can be used to detach the listener """ callback: Callback = ( listener.update # type: ignore[attr‑defined] @@ -78,7 +125,15 @@ def attach(self, listener: Observer | Callback, event: str | None = None) -> Dis return Disposable(lambda: self.detach(listener, event)) def detach(self, listener: Observer | Callback, event: str | None = None) -> None: - """Unregister a previously attached *listener*.""" + """Unregister a previously attached listener. + + Parameters + ---------- + listener : Observer | Callback + The observer object or callback function to detach + event : str | None, optional + The specific event to stop listening for. If None, detaches from all events, by default None + """ callback: Callback = ( listener.update # type: ignore[attr‑defined] @@ -94,12 +149,27 @@ def detach(self, listener: Observer | Callback, event: str | None = None) -> Non else: self._observers.get(event, weakref.WeakSet()).discard(callback) def __getstate__(self): + """Prepare object state for pickling by removing unpicklable attributes. + + Returns + ------- + dict + Object state dictionary with thread locks and weak references removed + """ state = self.__dict__.copy() state.pop('_lock', None) # RLock cannot be pickled state.pop('_observers', None) # WeakSet cannot be pickled state.pop('_any_observers', None) return state + def __setstate__(self, state): + """Restore object state after unpickling and reinitialize locks and observers. + + Parameters + ---------- + state : dict + The restored object state dictionary + """ self.__dict__.update(state) self._lock = threading.RLock() self._observers = {} @@ -107,7 +177,17 @@ def __setstate__(self, state): self._frozen = 0 # ‑‑‑ notification api -------------------------------------------------- def notify(self: T, event: str, *args: Any, **kwargs: Any) -> None: - """Notify observers that *event* happened.""" + """Notify all observers that an event has occurred. + + Parameters + ---------- + event : str + The name of the event that occurred + *args : Any + Positional arguments to pass to the observers + **kwargs : Any + Keyword arguments to pass to the observers + """ with self._lock: if self._frozen: @@ -134,7 +214,17 @@ def notify(self: T, event: str, *args: Any, **kwargs: Any) -> None: # ‑‑‑ batching ---------------------------------------------------------- @contextmanager def freeze_notifications(self): - """Context manager that batches notifications until exit.""" + """Context manager that batches notifications until exit. + + While in this context, notifications are queued rather than sent + immediately. When the context exits, all queued notifications are + sent in order. + + Yields + ------ + Observable + Self reference for method chaining + """ with self._lock: self._frozen += 1 diff --git a/LoopStructural/utils/utils.py b/LoopStructural/utils/utils.py index 40004e0f2..24bad4f23 100644 --- a/LoopStructural/utils/utils.py +++ b/LoopStructural/utils/utils.py @@ -6,6 +6,23 @@ def strike_symbol(strike): + """Create rotation vectors for geological strike symbols. + + Generate rotation matrix and vectors for displaying geological strike symbols + based on the strike angle. + + Parameters + ---------- + strike : float + Strike angle in degrees + + Returns + ------- + rotated : np.ndarray + Rotated vector for primary strike direction + r2 : np.ndarray + Rotated vector for secondary strike direction + """ R = np.zeros((2, 2)) R[0, 0] = np.cos(np.deg2rad(-strike)) R[0, 1] = -np.sin(np.deg2rad(-strike)) @@ -25,16 +42,27 @@ def strike_symbol(strike): def read_voxet(voxetname, propertyfile): - """ - Read a gocad property file and the geometry information from the .vo file - voxetname - is the path to the voxet file - propertyfile is the path to the binary file + """Read a GOCAD property file and the geometry information from the .vo file. + + Parameters + ---------- + voxetname : str + Path to the voxet (.vo) file + propertyfile : str + Path to the binary property file + Returns - origin numpy array - voxet_extent - is the length of each axis of the voxet - N is the number of steps in the voxet - array is the property values - steps is the size of the step vector for the voxet + ------- + origin : np.ndarray + Origin point of the voxet as numpy array + voxet_extent : np.ndarray + Length of each axis of the voxet + N : np.ndarray + Number of steps in each direction of the voxet + array : np.ndarray + Property values from the binary file + steps : np.ndarray + Size of the step vector for the voxet """ array = np.fromfile(propertyfile, dtype="float32") array = array.astype("f4") # big endian # array = propertyvalues.newbyteorder() From 6b851ca8e702320c3cd38270873843a157959487 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 28 Sep 2025 23:44:56 +0000 Subject: [PATCH 3/5] Phase 3 partial: Updated base geological interpolator with numpy docstrings Co-authored-by: lachlangrose <7371904+lachlangrose@users.noreply.github.com> --- .../_discrete_interpolator.py.backup | 851 ++++++++++++++++++ .../interpolators/_geological_interpolator.py | 184 +++- .../interpolators/_interpolator_builder.py | 12 +- 3 files changed, 1020 insertions(+), 27 deletions(-) create mode 100644 LoopStructural/interpolators/_discrete_interpolator.py.backup diff --git a/LoopStructural/interpolators/_discrete_interpolator.py.backup b/LoopStructural/interpolators/_discrete_interpolator.py.backup new file mode 100644 index 000000000..2f573f1ff --- /dev/null +++ b/LoopStructural/interpolators/_discrete_interpolator.py.backup @@ -0,0 +1,851 @@ +"""Discrete interpolator base for least squares optimization. + +This module contains the base class for discrete interpolators that solve +geological interpolation problems using least squares approximation on +discrete meshes or grids. +""" + +from abc import abstractmethod +from typing import Callable, Optional, Union +import logging + +import numpy as np +from scipy import sparse # import sparse.coo_matrix, sparse.bmat, sparse.eye +from ..interpolators import InterpolatorType + +from ..interpolators import GeologicalInterpolator +from ..utils import getLogger + +logger = getLogger(__name__) + + +class DiscreteInterpolator(GeologicalInterpolator): + """Base class for discrete interpolators using least squares optimization. + + This class provides the foundation for interpolators that discretize the + geological domain using meshes or grids and solve the interpolation problem + using least squares approximation methods. + + Parameters + ---------- + support : object + A discrete mesh or grid with nodes, elements, and geometric properties + data : dict, optional + Dictionary containing constraint data arrays, by default {} + c : array_like, optional + Initial solution vector. If None, zeros are used, by default None + up_to_date : bool, optional + Whether the interpolator is already built, by default False + + Attributes + ---------- + B : list + List of constraint matrices + support : object + The discrete support structure (mesh or grid) + c : np.ndarray + Solution vector + region_function : callable + Function defining the interpolation region + solver : str + Linear solver method to use + constraints : dict + Dictionary of applied constraints + interpolation_weights : dict + Weights for different constraint types + """ + + def __init__(self, support, data={}, c=None, up_to_date=False): + """Initialize the discrete interpolator. + + Sets up the basic data structures and parameters required for discrete + geological interpolation using least squares methods. + + Parameters + ---------- + support : object + A discrete mesh or grid with nodes, elements, and geometric properties + data : dict, optional + Dictionary containing constraint data arrays, by default {} + c : array_like, optional + Initial solution vector. If None or wrong size, zeros are used, by default None + up_to_date : bool, optional + Whether the interpolator is already built, by default False + """ + GeologicalInterpolator.__init__(self, data=data, up_to_date=up_to_date) + self.B = [] + self.support = support + self.dimensions = support.dimension + self.c = ( + np.array(c) + if c is not None and np.array(c).shape[0] == self.support.n_nodes + else np.zeros(self.support.n_nodes) + ) + self.region_function = lambda xyz: np.ones(xyz.shape[0], dtype=bool) + + self.shape = "rectangular" + if self.shape == "square": + self.B = np.zeros(self.dof) + self.c_ = 0 + + self.solver = "cg" + + self.eq_const_C = [] + self.eq_const_row = [] + self.eq_const_col = [] + self.eq_const_d = [] + + self.equal_constraints = {} + self.eq_const_c = 0 + self.ineq_constraints = {} + self.ineq_const_c = 0 + + self.non_linear_constraints = [] + self.constraints = {} + self.interpolation_weights = {} + logger.info("Creating discrete interpolator with {} degrees of freedom".format(self.dof)) + self.type = InterpolatorType.BASE_DISCRETE + + def set_nelements(self, nelements: int) -> int: + """Set the number of elements in the support structure. + + Parameters + ---------- + nelements : int + Target number of elements + + Returns + ------- + int + Actual number of elements set by the support structure + """ + return self.support.set_nelements(nelements) + + @property + def n_elements(self) -> int: + """Number of elements in the interpolator + + Returns + ------- + int + number of elements, positive + """ + return self.support.n_elements + + @property + def dof(self) -> int: + """Number of degrees of freedom for the interpolator + + Returns + ------- + int + number of degrees of freedom, positve + """ + return len(self.support.nodes[self.region]) + + @property + def region(self) -> np.ndarray: + """The active region of the interpolator. A boolean + mask for all elements that are interpolated + + Returns + ------- + np.ndarray + + """ + + return self.region_function(self.support.nodes).astype(bool) + + @property + def region_map(self): + region_map = np.zeros(self.support.n_nodes).astype(int) + region_map[self.region] = np.array(range(0, len(region_map[self.region]))) + return region_map + + def set_region(self, region=None): + """ + Set the region of the support the interpolator is working on + + Parameters + ---------- + region - function(position) + return true when in region, false when out + + Returns + ------- + + """ + # evaluate the region function on the support to determine + # which nodes are inside update region map and degrees of freedom + # self.region_function = region + logger.info( + "Cannot use region at the moment. Interpolation now uses region and has {} degrees of freedom".format( + self.dof + ) + ) + + def set_interpolation_weights(self, weights): + """ + Set the interpolation weights dictionary + + Parameters + ---------- + weights - dictionary + Entry of new weights to assign to self.interpolation_weights + + Returns + ------- + + """ + for key in weights: + self.up_to_date = False + self.interpolation_weights[key] = weights[key] + + def _pre_solve(self): + """ + Pre solve function to be run before solving the interpolation + """ + self.c = np.zeros(self.support.n_nodes) + self.c[:] = np.nan + return True + + def _post_solve(self): + """Post solve function(s) to be run after the solver has been called""" + self.clear_constraints() + return True + + def clear_constraints(self): + """ + Clear the constraints from the interpolator, this makes sure we are not storing + the constraints after the solver has been run + """ + self.constraints = {} + self.ineq_constraints = {} + self.equal_constraints = {} + + def reset(self): + """ + Reset the interpolation constraints + + """ + self.constraints = {} + self.c_ = 0 + self.regularisation_scale = np.ones(self.dof) + logger.info("Resetting interpolation constraints") + + def add_constraints_to_least_squares(self, A, B, idc, w=1.0, name="undefined"): + """ + Adds constraints to the least squares system. Automatically works + out the row + index given the shape of the input arrays + + Parameters + ---------- + A : numpy array / list + RxC numpy array of constraints where C is number of columns,R rows + B : numpy array /list + B values array length R + idc : numpy array/list + RxC column index + + Returns + ------- + list of constraint ids + + """ + A = np.array(A) + B = np.array(B) + idc = np.array(idc) + n_rows = A.shape[0] + # logger.debug('Adding constraints to interpolator: {} {} {}'.format(A.shape[0])) + # print(A.shape,B.shape,idc.shape) + if A.shape != idc.shape: + logger.error(f"Cannot add constraints: A and indexes have different shape : {name}") + return + + if len(A.shape) > 2: + n_rows = A.shape[0] * A.shape[1] + if isinstance(w, np.ndarray): + w = np.tile(w, (A.shape[1])) + A = A.reshape((A.shape[0] * A.shape[1], A.shape[2])) + idc = idc.reshape((idc.shape[0] * idc.shape[1], idc.shape[2])) + B = B.reshape((A.shape[0])) + # w = w.reshape((A.shape[0])) + # normalise by rows of A + # Should this be done? It should make the solution more stable + length = np.linalg.norm(A, axis=1) + # length[length>0] = 1. + B[length > 0] /= length[length > 0] + # going to assume if any are nan they are all nan + mask = np.any(np.isnan(A), axis=1) + A[mask, :] = 0 + A[length > 0, :] /= length[length > 0, None] + if isinstance(w, (float, int)): + w = np.ones(A.shape[0]) * w + if not isinstance(w, np.ndarray): + raise BaseException("w must be a numpy array") + + if w.shape[0] != A.shape[0]: + raise BaseException("Weight array does not match number of constraints") + if np.any(np.isnan(idc)) or np.any(np.isnan(A)) or np.any(np.isnan(B)): + logger.warning("Constraints contain nan not adding constraints: {}".format(name)) + # return + rows = np.arange(0, n_rows).astype(int) + base_name = name + while name in self.constraints: + count = 0 + if "_" in name: + count = int(name.split("_")[1]) + 1 + name = base_name + "_{}".format(count) + + rows = np.tile(rows, (A.shape[-1], 1)).T + self.constraints[name] = { + 'matrix': sparse.coo_matrix( + (A.flatten(), (rows.flatten(), idc.flatten())), shape=(n_rows, self.dof) + ).tocsc(), + 'b': B.flatten(), + 'w': w, + } + + @abstractmethod + def add_gradient_orthogonal_constraints( + self, points: np.ndarray, vectors: np.ndarray, w: float = 1.0 + ): + pass + + def calculate_residual_for_constraints(self): + """Calculates Ax-B for all constraints added to the interpolator + This could be a proxy to identify which constraints are controlling the model + + Returns + ------- + np.ndarray + vector of Ax-B + """ + residuals = {} + for constraint_name, constraint in self.constraints: + residuals[constraint_name] = ( + np.einsum("ij,ij->i", constraint["A"], self.c[constraint["idc"].astype(int)]) + - constraint["B"].flatten() + ) + return residuals + + def add_inequality_constraints_to_matrix( + self, A: np.ndarray, bounds: np.ndarray, idc: np.ndarray, name: str = "undefined" + ): + """Adds constraints for a matrix where the linear function + l < Ax > u constrains the objective function + + + Parameters + ---------- + A : numpy array + matrix of coefficients + bounds : numpy array + n*3 lower, upper, 1 + idc : numpy array + index of constraints in the matrix + Returns + ------- + + """ + # map from mesh node index to region node index + gi = np.zeros(self.support.n_nodes, dtype=int) + gi[:] = -1 + gi[self.region] = np.arange(0, self.dof, dtype=int) + idc = gi[idc] + rows = np.arange(0, idc.shape[0]) + rows = np.tile(rows, (A.shape[-1], 1)).T + + self.ineq_constraints[name] = { + 'matrix': sparse.coo_matrix( + (A.flatten(), (rows.flatten(), idc.flatten())), shape=(rows.shape[0], self.dof) + ).tocsc(), + "bounds": bounds, + } + + def add_value_inequality_constraints(self, w: float = 1.0): + points = self.get_inequality_value_constraints() + # check that we have added some points + if points.shape[0] > 0: + vertices, a, element, inside = self.support.get_element_for_location(points) + rows = np.arange(0, points[inside, :].shape[0], dtype=int) + rows = np.tile(rows, (a.shape[-1], 1)).T + a = a[inside] + cols = self.support.elements[element[inside]] + self.add_inequality_constraints_to_matrix(a, points[:, 3:5], cols, 'inequality_value') + + def add_inequality_pairs_constraints( + self, + w: float = 1.0, + upper_bound=np.finfo(float).eps, + lower_bound=-np.inf, + pairs: Optional[list] = None, + ): + + points = self.get_inequality_pairs_constraints() + if points.shape[0] > 0: + # assemble a list of pairs in the model + # this will make pairs even across stratigraphic boundaries + # TODO add option to only add stratigraphic pairs + if not pairs: + pairs = {} + k = 0 + for i in np.unique(points[:, self.support.dimension]): + for j in np.unique(points[:, self.support.dimension]): + if i == j: + continue + if tuple(sorted([i, j])) not in pairs: + pairs[tuple(sorted([i, j]))] = k + k += 1 + pairs = list(pairs.keys()) + for pair in pairs: + upper_points = points[points[:, self.support.dimension] == pair[0]] + lower_points = points[points[:, self.support.dimension] == pair[1]] + + upper_interpolation = self.support.get_element_for_location(upper_points) + lower_interpolation = self.support.get_element_for_location(lower_points) + if (~upper_interpolation[3]).sum() > 0: + logger.warning( + f'Upper points not in mesh {upper_points[~upper_interpolation[3]]}' + ) + if (~lower_interpolation[3]).sum() > 0: + logger.warning( + f'Lower points not in mesh {lower_points[~lower_interpolation[3]]}' + ) + ij = np.array( + [ + *np.meshgrid( + np.arange(0, int(upper_interpolation[3].sum()), dtype=int), + np.arange(0, int(lower_interpolation[3].sum()), dtype=int), + ) + ], + dtype=int, + ) + + ij = ij.reshape(2, -1).T + rows = np.arange(0, ij.shape[0], dtype=int) + rows = np.tile(rows, (upper_interpolation[1].shape[-1], 1)).T + rows = np.hstack([rows, rows]) + a = upper_interpolation[1][upper_interpolation[3]][ij[:, 0]] + a = np.hstack([a, -lower_interpolation[1][lower_interpolation[3]][ij[:, 1]]]) + cols = np.hstack( + [ + self.support.elements[ + upper_interpolation[2][upper_interpolation[3]][ij[:, 0]] + ], + self.support.elements[ + lower_interpolation[2][lower_interpolation[3]][ij[:, 1]] + ], + ] + ) + + bounds = np.zeros((ij.shape[0], 2)) + bounds[:, 0] = lower_bound + bounds[:, 1] = upper_bound + + self.add_inequality_constraints_to_matrix( + a, bounds, cols, f'inequality_pairs_{pair[0]}_{pair[1]}' + ) + + def add_inequality_feature( + self, + feature: Callable[[np.ndarray], np.ndarray], + lower: bool = True, + mask: Optional[np.ndarray] = None, + ): + """Add an inequality constraint to the interpolator using an existing feature. + This will make the interpolator greater than or less than the exising feature. + Evaluate the feature at the interpolation nodes. + Can provide a boolean mask to restrict to only some parts + + Parameters + ---------- + feature : BaseFeature + the feature that will be used to constraint the interpolator + lower : bool, optional + lower or upper constraint, by default True + mask : np.ndarray, optional + restrict the nodes to evaluate on, by default None + """ + # add inequality value for the nodes of the mesh + # flag lower determines whether the feature is a lower bound or upper bound + # mask is just a boolean array determining which nodes to apply it to + + value = feature(self.support.nodes) + if mask is None: + mask = np.ones(value.shape[0], dtype=bool) + l = np.zeros(value.shape[0]) - np.inf + u = np.zeros(value.shape[0]) + np.inf + mask = np.logical_and(mask, ~np.isnan(value)) + if lower: + l[mask] = value[mask] + if not lower: + u[mask] = value[mask] + + self.add_inequality_constraints_to_matrix( + np.ones((value.shape[0], 1)), + l, + u, + np.arange(0, self.dof, dtype=int), + ) + + def add_equality_constraints(self, node_idx, values, name="undefined"): + """ + Adds hard constraints to the least squares system. For now this just + sets + the node values to be fixed using a lagrangian. + + Parameters + ---------- + node_idx : numpy array/list + int array of node indexes + values : numpy array/list + array of node values + + Returns + ------- + + """ + # map from mesh node index to region node index + gi = np.zeros(self.support.n_nodes) + gi[:] = -1 + gi[self.region] = np.arange(0, self.dof) + idc = gi[node_idx] + outside = ~(idc == -1) + + self.equal_constraints[name] = { + "A": np.ones(idc[outside].shape[0]), + "B": values[outside], + "col": idc[outside], + # "w": w, + "row": np.arange(self.eq_const_c, self.eq_const_c + idc[outside].shape[0]), + } + self.eq_const_c += idc[outside].shape[0] + + def add_tangent_constraints(self, w=1.0): + r"""Add tangent constraints to the interpolation system. + + Adds constraints of the form :math:`f(X) \cdot T = 0` where T is the + tangent vector at location X. + + Parameters + ---------- + w : float, optional + Weight for the tangent constraints, by default 1.0 + + Notes + ----- + Tangent constraints ensure that the scalar field gradient is perpendicular + to specified tangent directions, typically used for structural measurements. + """ + + """ + points = self.get_tangent_constraints() + if points.shape[0] > 1: + self.add_gradient_orthogonal_constraints(points[:, :3], points[:, 3:6], w) + + def build_matrix(self): + """Assemble constraints into interpolation matrix. + + Adds equality constraints using Lagrange multipliers if necessary. + + Returns + ------- + tuple of (sparse.csr_matrix, np.ndarray) + Interpolation matrix A and constraint vector B + """ + + mats = [] + bs = [] + for c in self.constraints.values(): + if len(c["w"]) == 0: + continue + mats.append(c['matrix'].multiply(c['w'][:, None])) + bs.append(c['b'] * c['w']) + A = sparse.vstack(mats) + logger.info(f"Interpolation matrix is {A.shape[0]} x {A.shape[1]}") + + B = np.hstack(bs) + return A, B + + def add_equality_block(self, A, B): + """Add equality constraints to the interpolation system. + + Solves constrained least squares using Lagrange multipliers: + | ATA CT | |c| = |ATB| + | C 0 | |y| | d | + + Parameters + ---------- + A : sparse matrix + The interpolation matrix + B : np.ndarray + The constraint vector + + Notes + ----- + Where A is the interpolation matrix, C is the equality constraint matrix, + ATB are the interpolation constraints to be honoured in a least squares sense, + d are the equality constraints, c are the node values and y are the + Lagrange multipliers. + """ + if len(self.equal_constraints) > 0: + ATA = A.T.dot(A) + ATB = A.T.dot(B) + logger.info(f"Equality block is {self.eq_const_c} x {self.dof}") + # solving constrained least squares using + # | ATA CT | |c| = b + # | C 0 | |y| d + # where A is the interpoaltion matrix + # C is the equality constraint matrix + # b is the interpolation constraints to be honoured + # in a least squares sense + # and d are the equality constraints + # c are the node values and y are the + # lagrange multipliers# + a = [] + rows = [] + cols = [] + b = [] + for c in self.equal_constraints.values(): + b.extend((c["B"]).tolist()) + aa = c["A"].flatten() + mask = aa == 0 + a.extend(aa[~mask].tolist()) + rows.extend(c["row"].flatten()[~mask].tolist()) + cols.extend(c["col"].flatten()[~mask].tolist()) + + C = sparse.coo_matrix( + (np.array(a), (np.array(rows), cols)), + shape=(self.eq_const_c, self.dof), + dtype=float, + ).tocsr() + + d = np.array(b) + ATA = sparse.bmat([[ATA, C.T], [C, None]]) + ATB = np.hstack([ATB, d]) + + return ATA, ATB + + def build_inequality_matrix(self): + mats = [] + bounds = [] + for c in self.ineq_constraints.values(): + mats.append(c['matrix']) + bounds.append(c['bounds']) + if len(mats) == 0: + return sparse.csr_matrix((0, self.dof), dtype=float), np.zeros((0, 3)) + Q = sparse.vstack(mats) + bounds = np.vstack(bounds) + return Q, bounds + + def solve_system( + self, + solver: Optional[Union[Callable[[sparse.csr_matrix, np.ndarray], np.ndarray], str]] = None, + tol: Optional[float] = None, + solver_kwargs: dict = {}, + ) -> bool: + """ + Main entry point to run the solver and update the node value + attribute for the + discreteinterpolator class + + Parameters + ---------- + solver : string/callable + solver 'cg' conjugate gradient, 'lsmr' or callable function + solver_kwargs + kwargs for solver check scipy documentation for more information + + Returns + ------- + bool + True if the interpolation is run + + """ + if not self._pre_solve(): + raise ValueError("Pre solve failed") + + A, b = self.build_matrix() + Q, bounds = self.build_inequality_matrix() + if callable(solver): + logger.warning('Using custom solver') + self.c = solver(A.tocsr(), b) + self.up_to_date = True + elif isinstance(solver, str) or solver is None: + if solver not in ['cg', 'lsmr', 'admm']: + logger.warning( + f'Unknown solver {solver} using cg. \n Available solvers are cg and lsmr or a custom solver as a callable function' + ) + solver = 'cg' + if solver == 'cg': + logger.info("Solving using cg") + if 'atol' not in solver_kwargs or 'rtol' not in solver_kwargs: + if tol is not None: + solver_kwargs['atol'] = tol + + logger.info(f"Solver kwargs: {solver_kwargs}") + + res = sparse.linalg.cg(A.T @ A, A.T @ b, **solver_kwargs) + if res[1] > 0: + logger.warning( + f'CG reached iteration limit ({res[1]})and did not converge, check input data. Setting solution to last iteration' + ) + self.c = res[0] + self.up_to_date = True + + elif solver == 'lsmr': + logger.info("Solving using lsmr") + if 'atol' not in solver_kwargs: + if tol is not None: + solver_kwargs['atol'] = tol + if 'btol' not in solver_kwargs: + if tol is not None: + solver_kwargs['btol'] = tol + logger.info(f"Solver kwargs: {solver_kwargs}") + res = sparse.linalg.lsmr(A, b, **solver_kwargs) + if res[1] == 1 or res[1] == 4 or res[1] == 2 or res[1] == 5: + self.c = res[0] + elif res[1] == 0: + logger.warning("Solution to least squares problem is all zeros, check input data") + elif res[1] == 3 or res[1] == 6: + logger.warning("COND(A) seems to be greater than CONLIM, check input data") + # self.c = res[0] + elif res[1] == 7: + logger.warning( + "LSMR reached iteration limit and did not converge, check input data. Setting solution to last iteration" + ) + self.c = res[0] + self.up_to_date = True + + elif solver == 'admm': + logger.info("Solving using admm") + + if 'x0' in solver_kwargs: + x0 = solver_kwargs['x0'](self.support) + else: + x0 = np.zeros(A.shape[1]) + solver_kwargs.pop('x0', None) + if Q is None: + logger.warning("No inequality constraints, using lsmr") + return self.solve_system('lsmr', solver_kwargs) + + try: + from loopsolver import admm_solve + + try: + linsys_solver = solver_kwargs.pop('linsys_solver', 'lsmr') + res = admm_solve( + A, + b, + Q, + bounds, + x0=x0, + admm_weight=solver_kwargs.pop('admm_weight', 0.01), + nmajor=solver_kwargs.pop('nmajor', 200), + linsys_solver_kwargs=solver_kwargs, + linsys_solver=linsys_solver, + ) + self.c = res + self.up_to_date = True + except ValueError as e: + logger.error(f"ADMM solver failed: {e}") + self.up_to_date = False + except ImportError: + logger.warning( + "Cannot import admm solver. Please install loopsolver or use lsmr or cg" + ) + self.up_to_date = False + else: + logger.error(f"Unknown solver {solver}") + self.up_to_date = False + # self._post_solve() + return self.up_to_date + + def update(self) -> bool: + """ + Check if the solver is up to date, if not rerun interpolation using + the previously used solver. If the interpolation has not been run + before it will + return False + + Returns + ------- + bool + + """ + if self.solver is None: + logging.debug("Cannot rerun interpolator") + return False + if not self.up_to_date: + self.setup_interpolator() + self.up_to_date = self.solve_system(self.solver) + return self.up_to_date + + def evaluate_value(self, locations: np.ndarray) -> np.ndarray: + """Evaluate the value of the interpolator at given locations. + + Parameters + ---------- + locations : np.ndarray + Array of xyz coordinates where the interpolator should be evaluated + + Returns + ------- + np.ndarray + Values of the interpolator at the evaluation points + """ + self.update() + evaluation_points = np.array(locations) + evaluated = np.zeros(evaluation_points.shape[0]) + mask = np.any(evaluation_points == np.nan, axis=1) + + if evaluation_points[~mask, :].shape[0] > 0: + evaluated[~mask] = self.support.evaluate_value(evaluation_points[~mask], self.c) + return evaluated + + def evaluate_gradient(self, locations: np.ndarray) -> np.ndarray: + """Evaluate the gradient of the scalar field at the given locations. + + Parameters + ---------- + locations : np.ndarray + Array of xyz coordinates where gradient should be evaluated + + Returns + ------- + np.ndarray + Gradient vectors at the evaluation points with shape (n_points, 3) + """ + self.update() + if locations.shape[0] > 0: + return self.support.evaluate_gradient(locations, self.c) + return np.zeros((0, 3)) + + def to_dict(self): + """Convert the interpolator to a dictionary representation. + + Returns + ------- + dict + Dictionary containing the interpolator state and configuration + """ + return { + "type": self.type.name, + "support": self.support.to_dict(), + "c": self.c, + **super().to_dict(), + # 'region_function':self.region_function, + } + + def vtk(self): + """Get a VTK representation of the interpolator. + + Returns + ------- + object + VTK object containing the support structure with solution values + """ + return self.support.vtk({'c': self.c}) + diff --git a/LoopStructural/interpolators/_geological_interpolator.py b/LoopStructural/interpolators/_geological_interpolator.py index a71e1a7ba..5b6953f8e 100644 --- a/LoopStructural/interpolators/_geological_interpolator.py +++ b/LoopStructural/interpolators/_geological_interpolator.py @@ -1,5 +1,7 @@ -""" -Base geological interpolator +"""Base geological interpolator for LoopStructural. + +This module contains the abstract base class for all geological interpolators +used in LoopStructural geological modelling framework. """ from abc import ABCMeta, abstractmethod @@ -14,20 +16,57 @@ class GeologicalInterpolator(metaclass=ABCMeta): - """ + """Abstract base class for geological interpolators. + + This class defines the interface for all geological interpolators in + LoopStructural, providing methods for setting constraints and evaluating + the interpolated scalar field. + Attributes ---------- data : dict - a dictionary with np.arrays for gradient, value, normal, tangent data + Dictionary containing numpy arrays for gradient, value, normal, and tangent data + n_g : int + Number of gradient constraints + n_i : int + Number of interface/value constraints + n_n : int + Number of normal constraints + n_t : int + Number of tangent constraints + type : InterpolatorType + The type of interpolator + up_to_date : bool + Whether the interpolator needs to be rebuilt + constraints : list + List of applied constraints + valid : bool + Whether the interpolator is in a valid state + dimensions : int + Number of spatial dimensions (default 3) + support : object + The support structure used by the interpolator """ @abstractmethod def __init__(self, data={}, up_to_date=False): - """ - This class is the base class for a geological interpolator and contains - all of the main interface functions. Any class that is inheriting from - this should be callable by using any of these functions. This will - enable interpolators to be interchanged. + """Initialize the geological interpolator. + + This method sets up the basic data structures and parameters required + for geological interpolation. + + Parameters + ---------- + data : dict, optional + Dictionary containing constraint data arrays, by default {} + up_to_date : bool, optional + Whether the interpolator is already built and up to date, by default False + + Notes + ----- + This is an abstract method that must be implemented by subclasses. + All subclasses should call this parent constructor to ensure proper + initialization of the base data structures. """ self._data = {} self.data = data # None @@ -48,25 +87,75 @@ def __init__(self, data={}, up_to_date=False): @abstractmethod def set_nelements(self, nelements: int) -> int: + """Set the number of elements for the interpolation support. + + Parameters + ---------- + nelements : int + Target number of elements + + Returns + ------- + int + Actual number of elements set + + Notes + ----- + This is an abstract method that must be implemented by subclasses. + The actual number of elements may differ from the requested number + depending on the interpolator's constraints. + """ pass @property @abstractmethod def n_elements(self) -> int: + """Get the number of elements in the interpolation support. + + Returns + ------- + int + Number of elements + + Notes + ----- + This is an abstract property that must be implemented by subclasses. + """ pass @property def data(self): + """Get the constraint data dictionary. + + Returns + ------- + dict + Dictionary containing constraint data arrays + """ return self._data @data.setter def data(self, data): + """Set the constraint data dictionary. + + Parameters + ---------- + data : dict or None + Dictionary containing constraint data arrays. If None, an empty dict is used. + """ if data is None: data = {} for k, v in data.items(): self._data[k] = np.array(v) def __str__(self): + """Return string representation of the interpolator. + + Returns + ------- + str + String describing the interpolator type and constraint counts + """ name = f"{self.type} \n" name += f"{self.n_g} gradient points\n" name += f"{self.n_i} interface points\n" @@ -76,19 +165,41 @@ def __str__(self): return name def check_array(self, array: np.ndarray): + """Validate and convert input to numpy array. + + Parameters + ---------- + array : array_like + Input array to validate and convert + + Returns + ------- + np.ndarray + Validated numpy array + + Raises + ------ + LoopTypeError + If the array cannot be converted to a numpy array + """ try: return np.array(array) except Exception as e: raise LoopTypeError(str(e)) def to_json(self): - """ - Returns a json representation of the geological interpolator + """Return a JSON representation of the geological interpolator. Returns ------- - json : dict - json representation of the geological interpolator + dict + Dictionary containing the interpolator's state and configuration + suitable for JSON serialization + + Notes + ----- + This method packages the essential state of the interpolator including + its type, constraints, data, and build status for serialization. """ json = {} json["type"] = self.type @@ -102,20 +213,39 @@ def to_json(self): @abstractmethod def set_region(self, **kwargs): + """Set the interpolation region. + + Parameters + ---------- + **kwargs : dict + Region parameters specific to the interpolator implementation + + Notes + ----- + This is an abstract method that must be implemented by subclasses. + The specific parameters depend on the interpolator type. + """ pass def set_value_constraints(self, points: np.ndarray): - """ + """Set value constraints for the interpolation. Parameters ---------- points : np.ndarray - array containing the value constraints usually 4-5 columns. - X,Y,Z,val,weight + Array containing the value constraints with shape (n_points, 4-5). + Columns should be [X, Y, Z, value, weight]. If weight is not provided, + a weight of 1.0 is assumed for all points. - Returns - ------- + Raises + ------ + ValueError + If points array doesn't have the minimum required columns + Notes + ----- + Value constraints specify known scalar field values at specific locations. + These are typically used for interface points or measured data values. """ points = self.check_array(points) if points.shape[1] == self.dimensions + 1: @@ -127,17 +257,25 @@ def set_value_constraints(self, points: np.ndarray): self.up_to_date = False def set_gradient_constraints(self, points: np.ndarray): - """ + """Set gradient constraints for the interpolation. Parameters ---------- points : np.ndarray - array containing the value constraints usually 7-8 columns. - X,Y,Z,gx,gy,gz,weight + Array containing gradient constraints with shape (n_points, 7-8). + Columns should be [X, Y, Z, gx, gy, gz, weight]. If weight is not + provided, a weight of 1.0 is assumed for all points. - Returns - ------- + Raises + ------ + ValueError + If points array doesn't have the minimum required columns + Notes + ----- + Gradient constraints specify the direction and magnitude of the scalar + field gradient at specific locations. These are typically derived from + structural measurements like bedding or foliation orientations. """ if points.shape[1] == self.dimensions * 2: points = np.hstack([points, np.ones((points.shape[0], 1))]) diff --git a/LoopStructural/interpolators/_interpolator_builder.py b/LoopStructural/interpolators/_interpolator_builder.py index b67946627..573ba8361 100644 --- a/LoopStructural/interpolators/_interpolator_builder.py +++ b/LoopStructural/interpolators/_interpolator_builder.py @@ -62,17 +62,21 @@ def add_value_constraints(self, value_constraints: np.ndarray) -> 'InterpolatorB return self def add_gradient_constraints(self, gradient_constraints: np.ndarray) -> 'InterpolatorBuilder': - """Add gradient constraints to the interpolator - Where g1 and g2 are two vectors that are orthogonal to the gradient - $'(X)\cdot g1 = 0$ and $'(X)\cdot g2 = 0$ + """Add gradient constraints to the interpolator. + + Where g1 and g2 are two vectors that are orthogonal to the gradient: + f'(X) · g1 = 0 and f'(X) · g2 = 0 Parameters ---------- gradient_constraints : np.ndarray - x,y,z,gradient_x,gradient_y,gradient_z of the constraints + Array with columns [x, y, z, gradient_x, gradient_y, gradient_z] of the constraints Returns ------- + bool + True if constraints were added successfully + """ InterpolatorBuilder reference to the builder """ From 0b2ac4988fd629d27b010a2ecdb5bb847b030c54 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 28 Sep 2025 23:49:44 +0000 Subject: [PATCH 4/5] Phase 4 partial: Enhanced modeling framework with numpy docstrings Co-authored-by: lachlangrose <7371904+lachlangrose@users.noreply.github.com> --- .../interpolators/_interpolator_builder.py | 3 - .../modelling/core/stratigraphic_column.py | 251 +++++++++++++++--- .../modelling/features/_geological_feature.py | 77 ++++-- 3 files changed, 257 insertions(+), 74 deletions(-) diff --git a/LoopStructural/interpolators/_interpolator_builder.py b/LoopStructural/interpolators/_interpolator_builder.py index 573ba8361..7cf0fc6e0 100644 --- a/LoopStructural/interpolators/_interpolator_builder.py +++ b/LoopStructural/interpolators/_interpolator_builder.py @@ -77,9 +77,6 @@ def add_gradient_constraints(self, gradient_constraints: np.ndarray) -> 'Interpo bool True if constraints were added successfully """ - InterpolatorBuilder - reference to the builder - """ if self.interpolator: self.interpolator.set_gradient_constraints(gradient_constraints) diff --git a/LoopStructural/modelling/core/stratigraphic_column.py b/LoopStructural/modelling/core/stratigraphic_column.py index 8a69702d1..481a07b81 100644 --- a/LoopStructural/modelling/core/stratigraphic_column.py +++ b/LoopStructural/modelling/core/stratigraphic_column.py @@ -5,8 +5,14 @@ logger = getLogger(__name__) logger.info("Imported LoopStructural Stratigraphic Column module") class UnconformityType(enum.Enum): - """ - An enumeration for different types of unconformities in a stratigraphic column. + """Enumeration for different types of unconformities in a stratigraphic column. + + Attributes + ---------- + ERODE : str + Erosional unconformity type + ONLAP : str + Onlap unconformity type """ ERODE = 'erode' @@ -14,8 +20,14 @@ class UnconformityType(enum.Enum): class StratigraphicColumnElementType(enum.Enum): - """ - An enumeration for different types of elements in a stratigraphic column. + """Enumeration for different types of elements in a stratigraphic column. + + Attributes + ---------- + UNIT : str + Stratigraphic unit element type + UNCONFORMITY : str + Unconformity element type """ UNIT = 'unit' @@ -23,14 +35,29 @@ class StratigraphicColumnElementType(enum.Enum): class StratigraphicColumnElement: - """ - A class to represent an element in a stratigraphic column, which can be a unit or a topological object - for example unconformity. + """Base class for elements in a stratigraphic column. + + This class represents a generic element that can be either a stratigraphic unit + or a topological object such as an unconformity. + + Parameters + ---------- + uuid : str, optional + Unique identifier for the element. If None, a UUID4 is generated, by default None + + Attributes + ---------- + uuid : str + Unique identifier for the element """ def __init__(self, uuid=None): - """ - Initializes the StratigraphicColumnElement with a uuid. + """Initialize the StratigraphicColumnElement with a UUID. + + Parameters + ---------- + uuid : str, optional + Unique identifier for the element. If None, a UUID4 is generated, by default None """ if uuid is None: import uuid as uuid_module @@ -40,13 +67,53 @@ def __init__(self, uuid=None): class StratigraphicUnit(StratigraphicColumnElement, Observable['StratigraphicUnit']): - """ - A class to represent a stratigraphic unit. + """A class representing a stratigraphic unit in the geological column. + + This class combines the basic element functionality with observable capabilities + to track changes to unit properties such as thickness and ID. + + Parameters + ---------- + uuid : str, optional + Unique identifier for the unit, by default None + name : str, optional + Human-readable name for the unit, by default None + colour : array_like, optional + RGB colour values for visualization. If None, random colour is assigned, by default None + thickness : float, optional + Thickness of the unit, by default None + data : dict, optional + Additional data associated with the unit, by default None + id : int, optional + Numeric identifier for the unit, by default None + + Attributes + ---------- + element_type : StratigraphicColumnElementType + Type of the element (always UNIT for this class) + min_value : float + Minimum scalar field value for the unit + max_value : float + Maximum scalar field value for the unit """ def __init__(self, *, uuid=None, name=None, colour=None, thickness=None, data=None, id=None): - """ - Initializes the StratigraphicUnit with a name and an optional description. + """Initialize the StratigraphicUnit. + + Parameters + ---------- + uuid : str, optional + Unique identifier for the unit, by default None + name : str, optional + Human-readable name for the unit, by default None + colour : array_like, optional + RGB colour values for visualization. If None, random colour is assigned, by default None + thickness : float, optional + Thickness of the unit, by default None + data : dict, optional + Additional data associated with the unit, by default None + id : int, optional + Numeric identifier for the unit, by default None """ StratigraphicColumnElement.__init__(self, uuid) Observable.__init__(self) @@ -62,39 +129,78 @@ def __init__(self, *, uuid=None, name=None, colour=None, thickness=None, data=No self.max_value = None # Maximum scalar field value for the unit @property def id(self): + """Get the numeric identifier of the unit. + + Returns + ------- + int + The numeric identifier + """ return self._id + @property def thickness(self): + """Get the thickness of the unit. + + Returns + ------- + float + The thickness value + """ return self._thickness + @thickness.setter def thickness(self, value): - """ - Sets the thickness of the unit. + """Set the thickness of the unit and notify observers. + + Parameters + ---------- + value : float + The new thickness value """ self._thickness = value self.notify('unit/thickness_updated', unit=self) + @id.setter def id(self, value): - """ - Sets the ID of the unit. + """Set the ID of the unit and notify observers. + + Parameters + ---------- + value : int + The new ID value """ if not isinstance(value, int): raise TypeError("ID must be an integer") self._id = value self.notify('unit/id_updated', unit=self) def min(self): - """ - Returns the minimum value of the unit. + """Return the minimum scalar field value of the unit. + + Returns + ------- + float + Minimum value, or 0 if not set """ return self.min_value if self.min_value is not None else 0 + def max(self): - """ - Returns the maximum value of the unit. + """Return the maximum scalar field value of the unit. + + Returns + ------- + float + Maximum value, or infinity if not set """ return self.max_value if self.max_value is not None else np.inf + def to_dict(self): - """ - Converts the stratigraphic unit to a dictionary representation. + """Convert the stratigraphic unit to a dictionary representation. + + Returns + ------- + dict + Dictionary containing unit properties: name, colour, thickness, uuid, and id """ colour = self.colour if isinstance(colour, np.ndarray): @@ -103,8 +209,22 @@ def to_dict(self): @classmethod def from_dict(cls, data): - """ - Creates a StratigraphicUnit from a dictionary representation. + """Create a StratigraphicUnit from a dictionary representation. + + Parameters + ---------- + data : dict + Dictionary containing unit properties + + Returns + ------- + StratigraphicUnit + New stratigraphic unit instance + + Raises + ------ + TypeError + If data is not a dictionary """ if not isinstance(data, dict): raise TypeError("Data must be a dictionary") @@ -115,8 +235,12 @@ def from_dict(cls, data): return cls(uuid=uuid, name=name, colour=colour, thickness=thickness, id=data.get("id", None)) def __str__(self): - """ - Returns a string representation of the stratigraphic unit. + """Return a string representation of the stratigraphic unit. + + Returns + ------- + str + String representation showing name, colour, and thickness """ return ( f"StratigraphicUnit(name={self.name}, colour={self.colour}, thickness={self.thickness})" @@ -175,49 +299,92 @@ def from_dict(cls, data): uuid = data.get("uuid", None) return cls(uuid=uuid, name=name, unconformity_type=unconformity_type) class StratigraphicGroup: - """ - A class to represent a group of stratigraphic units. - This class is not fully implemented and serves as a placeholder for future development. + """A class representing a group of stratigraphic units. + + This class serves as a container for related stratigraphic units and provides + a placeholder for future group-level functionality. + + Parameters + ---------- + name : str, optional + Name of the stratigraphic group, by default None + units : list, optional + List of stratigraphic units in the group, by default None + + Attributes + ---------- + name : str + Name of the group + units : list + List of units contained in the group """ def __init__(self, name=None, units=None): - """ - Initializes the StratigraphicGroup with a name and an optional list of units. + """Initialize the StratigraphicGroup. + + Parameters + ---------- + name : str, optional + Name of the stratigraphic group, by default None + units : list, optional + List of stratigraphic units in the group, by default None """ self.name = name self.units = units if units is not None else [] class StratigraphicColumn(Observable['StratigraphicColumn']): - """ - A class to represent a stratigraphic column, which is a vertical section of the Earth's crust - showing the sequence of rock layers and their relationships. + """A class representing a stratigraphic column. + + The stratigraphic column represents a vertical section of the Earth's crust + showing the sequence of rock layers and their relationships, including + unconformities and basement rock. + + Attributes + ---------- + order : list + Ordered list of stratigraphic elements (units and unconformities) + group_mapping : dict + Mapping of groups to their constituent units """ def __init__(self): - """ - Initializes the StratigraphicColumn with a name and a list of layers. - """ + """Initialize the StratigraphicColumn with basement and base unconformity.""" super().__init__() self.order = [] self.add_basement() self.group_mapping = {} def get_new_id(self): - """ - Generates a new unique ID for a stratigraphic unit. + """Generate a new unique ID for a stratigraphic unit. + + Returns + ------- + int + New unique ID for the next unit """ if not self.order: return 0 return max([u.id for u in self.order if isinstance(u, StratigraphicUnit)], default=0) + 1 + def add_basement(self): + """Add basement unit and base unconformity to the stratigraphic column. + + This method adds the fundamental basement unit with infinite thickness + and an erosional unconformity at the base of the column. + """ self.add_unit(name='Basement', colour='grey', thickness=np.inf) self.add_unconformity( name='Base Unconformity', unconformity_type=UnconformityType.ERODE ) + def clear(self, basement=True): - """ - Clears the stratigraphic column, removing all elements. + """Clear the stratigraphic column, removing all elements. + + Parameters + ---------- + basement : bool, optional + Whether to add basement after clearing, by default True """ if basement: self.add_basement() diff --git a/LoopStructural/modelling/features/_geological_feature.py b/LoopStructural/modelling/features/_geological_feature.py index 82ca40b98..a18f36cc1 100644 --- a/LoopStructural/modelling/features/_geological_feature.py +++ b/LoopStructural/modelling/features/_geological_feature.py @@ -1,5 +1,7 @@ -""" -Geological features +"""Geological features for LoopStructural modeling. + +This module contains classes for representing geometrical elements in geological +models such as foliations, fault planes, and fold rotation angles. """ from LoopStructural.utils.maths import regular_tetraherdron_for_points, gradient_from_tetrahedron @@ -16,24 +18,37 @@ class GeologicalFeature(BaseFeature): - """ - Geological feature is class that is used to represent a geometrical element in a geological - model. For example foliations, fault planes, fold rotation angles etc. + """A geological feature representing a geometrical element in a geological model. + + This class provides the foundation for representing various geological structures + such as foliations, fault planes, fold rotation angles, and other geometrical + elements within a geological model. + + Parameters + ---------- + name : str + Unique name for the geological feature + builder : GeologicalFeatureBuilder + Builder object used to construct the feature + regions : list, optional + List of boolean functions defining where the feature is active, by default [] + faults : list, optional + List of FaultSegments that affect this feature, by default [] + interpolator : GeologicalInterpolator, optional + Interpolator for the feature. If None, uses builder's interpolator, by default None + model : GeologicalModel, optional + The geological model containing this feature, by default None Attributes ---------- - name : string - should be a unique name for the geological feature - support : a ScalarField - holds the property values for the feature and links to the - support geometry - data : list - list containing geological data - region : list - list of boolean functions defining whether the feature is - active - faults : list - list of FaultSegments that affect this feature + name : str + Unique name for the geological feature + builder : GeologicalFeatureBuilder + Builder object used to construct the feature + interpolator : GeologicalInterpolator + Interpolator used to compute field values + type : FeatureType + Type of the feature (set to INTERPOLATED) """ def __init__( @@ -45,18 +60,22 @@ def __init__( interpolator=None, model=None, ): - """Default constructor for geological feature + """Initialize the geological feature. Parameters ---------- - name: str - interpolator : GeologicalInterpolator + name : str + Unique name for the geological feature builder : GeologicalFeatureBuilder - region : list - faults : list - model : GeologicalModel - - + Builder object used to construct the feature + regions : list, optional + List of boolean functions defining where the feature is active, by default [] + faults : list, optional + List of FaultSegments that affect this feature, by default [] + interpolator : GeologicalInterpolator, optional + Interpolator for the feature. If None, uses builder's interpolator, by default None + model : GeologicalModel, optional + The geological model containing this feature, by default None """ BaseFeature.__init__(self, name, model, faults, regions, builder) self.name = name @@ -65,13 +84,13 @@ def __init__( self.type = FeatureType.INTERPOLATED def to_json(self): - """ - Returns a json representation of the geological feature + """Return a JSON representation of the geological feature. Returns ------- - json : dict - json representation of the geological feature + dict + Dictionary containing the feature's state suitable for JSON serialization, + including interpolator configuration """ json = super().to_json() print(self.name, json) From f99921755978a7bc4d4f5319e7d2a5c78a4c518c Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 29 Sep 2025 09:03:57 +0000 Subject: [PATCH 5/5] Remove accidentally committed backup file _discrete_interpolator.py.backup Co-authored-by: lachlangrose <7371904+lachlangrose@users.noreply.github.com> --- .../_discrete_interpolator.py.backup | 851 ------------------ 1 file changed, 851 deletions(-) delete mode 100644 LoopStructural/interpolators/_discrete_interpolator.py.backup diff --git a/LoopStructural/interpolators/_discrete_interpolator.py.backup b/LoopStructural/interpolators/_discrete_interpolator.py.backup deleted file mode 100644 index 2f573f1ff..000000000 --- a/LoopStructural/interpolators/_discrete_interpolator.py.backup +++ /dev/null @@ -1,851 +0,0 @@ -"""Discrete interpolator base for least squares optimization. - -This module contains the base class for discrete interpolators that solve -geological interpolation problems using least squares approximation on -discrete meshes or grids. -""" - -from abc import abstractmethod -from typing import Callable, Optional, Union -import logging - -import numpy as np -from scipy import sparse # import sparse.coo_matrix, sparse.bmat, sparse.eye -from ..interpolators import InterpolatorType - -from ..interpolators import GeologicalInterpolator -from ..utils import getLogger - -logger = getLogger(__name__) - - -class DiscreteInterpolator(GeologicalInterpolator): - """Base class for discrete interpolators using least squares optimization. - - This class provides the foundation for interpolators that discretize the - geological domain using meshes or grids and solve the interpolation problem - using least squares approximation methods. - - Parameters - ---------- - support : object - A discrete mesh or grid with nodes, elements, and geometric properties - data : dict, optional - Dictionary containing constraint data arrays, by default {} - c : array_like, optional - Initial solution vector. If None, zeros are used, by default None - up_to_date : bool, optional - Whether the interpolator is already built, by default False - - Attributes - ---------- - B : list - List of constraint matrices - support : object - The discrete support structure (mesh or grid) - c : np.ndarray - Solution vector - region_function : callable - Function defining the interpolation region - solver : str - Linear solver method to use - constraints : dict - Dictionary of applied constraints - interpolation_weights : dict - Weights for different constraint types - """ - - def __init__(self, support, data={}, c=None, up_to_date=False): - """Initialize the discrete interpolator. - - Sets up the basic data structures and parameters required for discrete - geological interpolation using least squares methods. - - Parameters - ---------- - support : object - A discrete mesh or grid with nodes, elements, and geometric properties - data : dict, optional - Dictionary containing constraint data arrays, by default {} - c : array_like, optional - Initial solution vector. If None or wrong size, zeros are used, by default None - up_to_date : bool, optional - Whether the interpolator is already built, by default False - """ - GeologicalInterpolator.__init__(self, data=data, up_to_date=up_to_date) - self.B = [] - self.support = support - self.dimensions = support.dimension - self.c = ( - np.array(c) - if c is not None and np.array(c).shape[0] == self.support.n_nodes - else np.zeros(self.support.n_nodes) - ) - self.region_function = lambda xyz: np.ones(xyz.shape[0], dtype=bool) - - self.shape = "rectangular" - if self.shape == "square": - self.B = np.zeros(self.dof) - self.c_ = 0 - - self.solver = "cg" - - self.eq_const_C = [] - self.eq_const_row = [] - self.eq_const_col = [] - self.eq_const_d = [] - - self.equal_constraints = {} - self.eq_const_c = 0 - self.ineq_constraints = {} - self.ineq_const_c = 0 - - self.non_linear_constraints = [] - self.constraints = {} - self.interpolation_weights = {} - logger.info("Creating discrete interpolator with {} degrees of freedom".format(self.dof)) - self.type = InterpolatorType.BASE_DISCRETE - - def set_nelements(self, nelements: int) -> int: - """Set the number of elements in the support structure. - - Parameters - ---------- - nelements : int - Target number of elements - - Returns - ------- - int - Actual number of elements set by the support structure - """ - return self.support.set_nelements(nelements) - - @property - def n_elements(self) -> int: - """Number of elements in the interpolator - - Returns - ------- - int - number of elements, positive - """ - return self.support.n_elements - - @property - def dof(self) -> int: - """Number of degrees of freedom for the interpolator - - Returns - ------- - int - number of degrees of freedom, positve - """ - return len(self.support.nodes[self.region]) - - @property - def region(self) -> np.ndarray: - """The active region of the interpolator. A boolean - mask for all elements that are interpolated - - Returns - ------- - np.ndarray - - """ - - return self.region_function(self.support.nodes).astype(bool) - - @property - def region_map(self): - region_map = np.zeros(self.support.n_nodes).astype(int) - region_map[self.region] = np.array(range(0, len(region_map[self.region]))) - return region_map - - def set_region(self, region=None): - """ - Set the region of the support the interpolator is working on - - Parameters - ---------- - region - function(position) - return true when in region, false when out - - Returns - ------- - - """ - # evaluate the region function on the support to determine - # which nodes are inside update region map and degrees of freedom - # self.region_function = region - logger.info( - "Cannot use region at the moment. Interpolation now uses region and has {} degrees of freedom".format( - self.dof - ) - ) - - def set_interpolation_weights(self, weights): - """ - Set the interpolation weights dictionary - - Parameters - ---------- - weights - dictionary - Entry of new weights to assign to self.interpolation_weights - - Returns - ------- - - """ - for key in weights: - self.up_to_date = False - self.interpolation_weights[key] = weights[key] - - def _pre_solve(self): - """ - Pre solve function to be run before solving the interpolation - """ - self.c = np.zeros(self.support.n_nodes) - self.c[:] = np.nan - return True - - def _post_solve(self): - """Post solve function(s) to be run after the solver has been called""" - self.clear_constraints() - return True - - def clear_constraints(self): - """ - Clear the constraints from the interpolator, this makes sure we are not storing - the constraints after the solver has been run - """ - self.constraints = {} - self.ineq_constraints = {} - self.equal_constraints = {} - - def reset(self): - """ - Reset the interpolation constraints - - """ - self.constraints = {} - self.c_ = 0 - self.regularisation_scale = np.ones(self.dof) - logger.info("Resetting interpolation constraints") - - def add_constraints_to_least_squares(self, A, B, idc, w=1.0, name="undefined"): - """ - Adds constraints to the least squares system. Automatically works - out the row - index given the shape of the input arrays - - Parameters - ---------- - A : numpy array / list - RxC numpy array of constraints where C is number of columns,R rows - B : numpy array /list - B values array length R - idc : numpy array/list - RxC column index - - Returns - ------- - list of constraint ids - - """ - A = np.array(A) - B = np.array(B) - idc = np.array(idc) - n_rows = A.shape[0] - # logger.debug('Adding constraints to interpolator: {} {} {}'.format(A.shape[0])) - # print(A.shape,B.shape,idc.shape) - if A.shape != idc.shape: - logger.error(f"Cannot add constraints: A and indexes have different shape : {name}") - return - - if len(A.shape) > 2: - n_rows = A.shape[0] * A.shape[1] - if isinstance(w, np.ndarray): - w = np.tile(w, (A.shape[1])) - A = A.reshape((A.shape[0] * A.shape[1], A.shape[2])) - idc = idc.reshape((idc.shape[0] * idc.shape[1], idc.shape[2])) - B = B.reshape((A.shape[0])) - # w = w.reshape((A.shape[0])) - # normalise by rows of A - # Should this be done? It should make the solution more stable - length = np.linalg.norm(A, axis=1) - # length[length>0] = 1. - B[length > 0] /= length[length > 0] - # going to assume if any are nan they are all nan - mask = np.any(np.isnan(A), axis=1) - A[mask, :] = 0 - A[length > 0, :] /= length[length > 0, None] - if isinstance(w, (float, int)): - w = np.ones(A.shape[0]) * w - if not isinstance(w, np.ndarray): - raise BaseException("w must be a numpy array") - - if w.shape[0] != A.shape[0]: - raise BaseException("Weight array does not match number of constraints") - if np.any(np.isnan(idc)) or np.any(np.isnan(A)) or np.any(np.isnan(B)): - logger.warning("Constraints contain nan not adding constraints: {}".format(name)) - # return - rows = np.arange(0, n_rows).astype(int) - base_name = name - while name in self.constraints: - count = 0 - if "_" in name: - count = int(name.split("_")[1]) + 1 - name = base_name + "_{}".format(count) - - rows = np.tile(rows, (A.shape[-1], 1)).T - self.constraints[name] = { - 'matrix': sparse.coo_matrix( - (A.flatten(), (rows.flatten(), idc.flatten())), shape=(n_rows, self.dof) - ).tocsc(), - 'b': B.flatten(), - 'w': w, - } - - @abstractmethod - def add_gradient_orthogonal_constraints( - self, points: np.ndarray, vectors: np.ndarray, w: float = 1.0 - ): - pass - - def calculate_residual_for_constraints(self): - """Calculates Ax-B for all constraints added to the interpolator - This could be a proxy to identify which constraints are controlling the model - - Returns - ------- - np.ndarray - vector of Ax-B - """ - residuals = {} - for constraint_name, constraint in self.constraints: - residuals[constraint_name] = ( - np.einsum("ij,ij->i", constraint["A"], self.c[constraint["idc"].astype(int)]) - - constraint["B"].flatten() - ) - return residuals - - def add_inequality_constraints_to_matrix( - self, A: np.ndarray, bounds: np.ndarray, idc: np.ndarray, name: str = "undefined" - ): - """Adds constraints for a matrix where the linear function - l < Ax > u constrains the objective function - - - Parameters - ---------- - A : numpy array - matrix of coefficients - bounds : numpy array - n*3 lower, upper, 1 - idc : numpy array - index of constraints in the matrix - Returns - ------- - - """ - # map from mesh node index to region node index - gi = np.zeros(self.support.n_nodes, dtype=int) - gi[:] = -1 - gi[self.region] = np.arange(0, self.dof, dtype=int) - idc = gi[idc] - rows = np.arange(0, idc.shape[0]) - rows = np.tile(rows, (A.shape[-1], 1)).T - - self.ineq_constraints[name] = { - 'matrix': sparse.coo_matrix( - (A.flatten(), (rows.flatten(), idc.flatten())), shape=(rows.shape[0], self.dof) - ).tocsc(), - "bounds": bounds, - } - - def add_value_inequality_constraints(self, w: float = 1.0): - points = self.get_inequality_value_constraints() - # check that we have added some points - if points.shape[0] > 0: - vertices, a, element, inside = self.support.get_element_for_location(points) - rows = np.arange(0, points[inside, :].shape[0], dtype=int) - rows = np.tile(rows, (a.shape[-1], 1)).T - a = a[inside] - cols = self.support.elements[element[inside]] - self.add_inequality_constraints_to_matrix(a, points[:, 3:5], cols, 'inequality_value') - - def add_inequality_pairs_constraints( - self, - w: float = 1.0, - upper_bound=np.finfo(float).eps, - lower_bound=-np.inf, - pairs: Optional[list] = None, - ): - - points = self.get_inequality_pairs_constraints() - if points.shape[0] > 0: - # assemble a list of pairs in the model - # this will make pairs even across stratigraphic boundaries - # TODO add option to only add stratigraphic pairs - if not pairs: - pairs = {} - k = 0 - for i in np.unique(points[:, self.support.dimension]): - for j in np.unique(points[:, self.support.dimension]): - if i == j: - continue - if tuple(sorted([i, j])) not in pairs: - pairs[tuple(sorted([i, j]))] = k - k += 1 - pairs = list(pairs.keys()) - for pair in pairs: - upper_points = points[points[:, self.support.dimension] == pair[0]] - lower_points = points[points[:, self.support.dimension] == pair[1]] - - upper_interpolation = self.support.get_element_for_location(upper_points) - lower_interpolation = self.support.get_element_for_location(lower_points) - if (~upper_interpolation[3]).sum() > 0: - logger.warning( - f'Upper points not in mesh {upper_points[~upper_interpolation[3]]}' - ) - if (~lower_interpolation[3]).sum() > 0: - logger.warning( - f'Lower points not in mesh {lower_points[~lower_interpolation[3]]}' - ) - ij = np.array( - [ - *np.meshgrid( - np.arange(0, int(upper_interpolation[3].sum()), dtype=int), - np.arange(0, int(lower_interpolation[3].sum()), dtype=int), - ) - ], - dtype=int, - ) - - ij = ij.reshape(2, -1).T - rows = np.arange(0, ij.shape[0], dtype=int) - rows = np.tile(rows, (upper_interpolation[1].shape[-1], 1)).T - rows = np.hstack([rows, rows]) - a = upper_interpolation[1][upper_interpolation[3]][ij[:, 0]] - a = np.hstack([a, -lower_interpolation[1][lower_interpolation[3]][ij[:, 1]]]) - cols = np.hstack( - [ - self.support.elements[ - upper_interpolation[2][upper_interpolation[3]][ij[:, 0]] - ], - self.support.elements[ - lower_interpolation[2][lower_interpolation[3]][ij[:, 1]] - ], - ] - ) - - bounds = np.zeros((ij.shape[0], 2)) - bounds[:, 0] = lower_bound - bounds[:, 1] = upper_bound - - self.add_inequality_constraints_to_matrix( - a, bounds, cols, f'inequality_pairs_{pair[0]}_{pair[1]}' - ) - - def add_inequality_feature( - self, - feature: Callable[[np.ndarray], np.ndarray], - lower: bool = True, - mask: Optional[np.ndarray] = None, - ): - """Add an inequality constraint to the interpolator using an existing feature. - This will make the interpolator greater than or less than the exising feature. - Evaluate the feature at the interpolation nodes. - Can provide a boolean mask to restrict to only some parts - - Parameters - ---------- - feature : BaseFeature - the feature that will be used to constraint the interpolator - lower : bool, optional - lower or upper constraint, by default True - mask : np.ndarray, optional - restrict the nodes to evaluate on, by default None - """ - # add inequality value for the nodes of the mesh - # flag lower determines whether the feature is a lower bound or upper bound - # mask is just a boolean array determining which nodes to apply it to - - value = feature(self.support.nodes) - if mask is None: - mask = np.ones(value.shape[0], dtype=bool) - l = np.zeros(value.shape[0]) - np.inf - u = np.zeros(value.shape[0]) + np.inf - mask = np.logical_and(mask, ~np.isnan(value)) - if lower: - l[mask] = value[mask] - if not lower: - u[mask] = value[mask] - - self.add_inequality_constraints_to_matrix( - np.ones((value.shape[0], 1)), - l, - u, - np.arange(0, self.dof, dtype=int), - ) - - def add_equality_constraints(self, node_idx, values, name="undefined"): - """ - Adds hard constraints to the least squares system. For now this just - sets - the node values to be fixed using a lagrangian. - - Parameters - ---------- - node_idx : numpy array/list - int array of node indexes - values : numpy array/list - array of node values - - Returns - ------- - - """ - # map from mesh node index to region node index - gi = np.zeros(self.support.n_nodes) - gi[:] = -1 - gi[self.region] = np.arange(0, self.dof) - idc = gi[node_idx] - outside = ~(idc == -1) - - self.equal_constraints[name] = { - "A": np.ones(idc[outside].shape[0]), - "B": values[outside], - "col": idc[outside], - # "w": w, - "row": np.arange(self.eq_const_c, self.eq_const_c + idc[outside].shape[0]), - } - self.eq_const_c += idc[outside].shape[0] - - def add_tangent_constraints(self, w=1.0): - r"""Add tangent constraints to the interpolation system. - - Adds constraints of the form :math:`f(X) \cdot T = 0` where T is the - tangent vector at location X. - - Parameters - ---------- - w : float, optional - Weight for the tangent constraints, by default 1.0 - - Notes - ----- - Tangent constraints ensure that the scalar field gradient is perpendicular - to specified tangent directions, typically used for structural measurements. - """ - - """ - points = self.get_tangent_constraints() - if points.shape[0] > 1: - self.add_gradient_orthogonal_constraints(points[:, :3], points[:, 3:6], w) - - def build_matrix(self): - """Assemble constraints into interpolation matrix. - - Adds equality constraints using Lagrange multipliers if necessary. - - Returns - ------- - tuple of (sparse.csr_matrix, np.ndarray) - Interpolation matrix A and constraint vector B - """ - - mats = [] - bs = [] - for c in self.constraints.values(): - if len(c["w"]) == 0: - continue - mats.append(c['matrix'].multiply(c['w'][:, None])) - bs.append(c['b'] * c['w']) - A = sparse.vstack(mats) - logger.info(f"Interpolation matrix is {A.shape[0]} x {A.shape[1]}") - - B = np.hstack(bs) - return A, B - - def add_equality_block(self, A, B): - """Add equality constraints to the interpolation system. - - Solves constrained least squares using Lagrange multipliers: - | ATA CT | |c| = |ATB| - | C 0 | |y| | d | - - Parameters - ---------- - A : sparse matrix - The interpolation matrix - B : np.ndarray - The constraint vector - - Notes - ----- - Where A is the interpolation matrix, C is the equality constraint matrix, - ATB are the interpolation constraints to be honoured in a least squares sense, - d are the equality constraints, c are the node values and y are the - Lagrange multipliers. - """ - if len(self.equal_constraints) > 0: - ATA = A.T.dot(A) - ATB = A.T.dot(B) - logger.info(f"Equality block is {self.eq_const_c} x {self.dof}") - # solving constrained least squares using - # | ATA CT | |c| = b - # | C 0 | |y| d - # where A is the interpoaltion matrix - # C is the equality constraint matrix - # b is the interpolation constraints to be honoured - # in a least squares sense - # and d are the equality constraints - # c are the node values and y are the - # lagrange multipliers# - a = [] - rows = [] - cols = [] - b = [] - for c in self.equal_constraints.values(): - b.extend((c["B"]).tolist()) - aa = c["A"].flatten() - mask = aa == 0 - a.extend(aa[~mask].tolist()) - rows.extend(c["row"].flatten()[~mask].tolist()) - cols.extend(c["col"].flatten()[~mask].tolist()) - - C = sparse.coo_matrix( - (np.array(a), (np.array(rows), cols)), - shape=(self.eq_const_c, self.dof), - dtype=float, - ).tocsr() - - d = np.array(b) - ATA = sparse.bmat([[ATA, C.T], [C, None]]) - ATB = np.hstack([ATB, d]) - - return ATA, ATB - - def build_inequality_matrix(self): - mats = [] - bounds = [] - for c in self.ineq_constraints.values(): - mats.append(c['matrix']) - bounds.append(c['bounds']) - if len(mats) == 0: - return sparse.csr_matrix((0, self.dof), dtype=float), np.zeros((0, 3)) - Q = sparse.vstack(mats) - bounds = np.vstack(bounds) - return Q, bounds - - def solve_system( - self, - solver: Optional[Union[Callable[[sparse.csr_matrix, np.ndarray], np.ndarray], str]] = None, - tol: Optional[float] = None, - solver_kwargs: dict = {}, - ) -> bool: - """ - Main entry point to run the solver and update the node value - attribute for the - discreteinterpolator class - - Parameters - ---------- - solver : string/callable - solver 'cg' conjugate gradient, 'lsmr' or callable function - solver_kwargs - kwargs for solver check scipy documentation for more information - - Returns - ------- - bool - True if the interpolation is run - - """ - if not self._pre_solve(): - raise ValueError("Pre solve failed") - - A, b = self.build_matrix() - Q, bounds = self.build_inequality_matrix() - if callable(solver): - logger.warning('Using custom solver') - self.c = solver(A.tocsr(), b) - self.up_to_date = True - elif isinstance(solver, str) or solver is None: - if solver not in ['cg', 'lsmr', 'admm']: - logger.warning( - f'Unknown solver {solver} using cg. \n Available solvers are cg and lsmr or a custom solver as a callable function' - ) - solver = 'cg' - if solver == 'cg': - logger.info("Solving using cg") - if 'atol' not in solver_kwargs or 'rtol' not in solver_kwargs: - if tol is not None: - solver_kwargs['atol'] = tol - - logger.info(f"Solver kwargs: {solver_kwargs}") - - res = sparse.linalg.cg(A.T @ A, A.T @ b, **solver_kwargs) - if res[1] > 0: - logger.warning( - f'CG reached iteration limit ({res[1]})and did not converge, check input data. Setting solution to last iteration' - ) - self.c = res[0] - self.up_to_date = True - - elif solver == 'lsmr': - logger.info("Solving using lsmr") - if 'atol' not in solver_kwargs: - if tol is not None: - solver_kwargs['atol'] = tol - if 'btol' not in solver_kwargs: - if tol is not None: - solver_kwargs['btol'] = tol - logger.info(f"Solver kwargs: {solver_kwargs}") - res = sparse.linalg.lsmr(A, b, **solver_kwargs) - if res[1] == 1 or res[1] == 4 or res[1] == 2 or res[1] == 5: - self.c = res[0] - elif res[1] == 0: - logger.warning("Solution to least squares problem is all zeros, check input data") - elif res[1] == 3 or res[1] == 6: - logger.warning("COND(A) seems to be greater than CONLIM, check input data") - # self.c = res[0] - elif res[1] == 7: - logger.warning( - "LSMR reached iteration limit and did not converge, check input data. Setting solution to last iteration" - ) - self.c = res[0] - self.up_to_date = True - - elif solver == 'admm': - logger.info("Solving using admm") - - if 'x0' in solver_kwargs: - x0 = solver_kwargs['x0'](self.support) - else: - x0 = np.zeros(A.shape[1]) - solver_kwargs.pop('x0', None) - if Q is None: - logger.warning("No inequality constraints, using lsmr") - return self.solve_system('lsmr', solver_kwargs) - - try: - from loopsolver import admm_solve - - try: - linsys_solver = solver_kwargs.pop('linsys_solver', 'lsmr') - res = admm_solve( - A, - b, - Q, - bounds, - x0=x0, - admm_weight=solver_kwargs.pop('admm_weight', 0.01), - nmajor=solver_kwargs.pop('nmajor', 200), - linsys_solver_kwargs=solver_kwargs, - linsys_solver=linsys_solver, - ) - self.c = res - self.up_to_date = True - except ValueError as e: - logger.error(f"ADMM solver failed: {e}") - self.up_to_date = False - except ImportError: - logger.warning( - "Cannot import admm solver. Please install loopsolver or use lsmr or cg" - ) - self.up_to_date = False - else: - logger.error(f"Unknown solver {solver}") - self.up_to_date = False - # self._post_solve() - return self.up_to_date - - def update(self) -> bool: - """ - Check if the solver is up to date, if not rerun interpolation using - the previously used solver. If the interpolation has not been run - before it will - return False - - Returns - ------- - bool - - """ - if self.solver is None: - logging.debug("Cannot rerun interpolator") - return False - if not self.up_to_date: - self.setup_interpolator() - self.up_to_date = self.solve_system(self.solver) - return self.up_to_date - - def evaluate_value(self, locations: np.ndarray) -> np.ndarray: - """Evaluate the value of the interpolator at given locations. - - Parameters - ---------- - locations : np.ndarray - Array of xyz coordinates where the interpolator should be evaluated - - Returns - ------- - np.ndarray - Values of the interpolator at the evaluation points - """ - self.update() - evaluation_points = np.array(locations) - evaluated = np.zeros(evaluation_points.shape[0]) - mask = np.any(evaluation_points == np.nan, axis=1) - - if evaluation_points[~mask, :].shape[0] > 0: - evaluated[~mask] = self.support.evaluate_value(evaluation_points[~mask], self.c) - return evaluated - - def evaluate_gradient(self, locations: np.ndarray) -> np.ndarray: - """Evaluate the gradient of the scalar field at the given locations. - - Parameters - ---------- - locations : np.ndarray - Array of xyz coordinates where gradient should be evaluated - - Returns - ------- - np.ndarray - Gradient vectors at the evaluation points with shape (n_points, 3) - """ - self.update() - if locations.shape[0] > 0: - return self.support.evaluate_gradient(locations, self.c) - return np.zeros((0, 3)) - - def to_dict(self): - """Convert the interpolator to a dictionary representation. - - Returns - ------- - dict - Dictionary containing the interpolator state and configuration - """ - return { - "type": self.type.name, - "support": self.support.to_dict(), - "c": self.c, - **super().to_dict(), - # 'region_function':self.region_function, - } - - def vtk(self): - """Get a VTK representation of the interpolator. - - Returns - ------- - object - VTK object containing the support structure with solution values - """ - return self.support.vtk({'c': self.c}) -