diff --git a/docs/src/fitting/constraints.rst b/docs/src/fitting/constraints.rst index 78b4e1ff..d92c87c2 100644 --- a/docs/src/fitting/constraints.rst +++ b/docs/src/fitting/constraints.rst @@ -19,7 +19,7 @@ Constraints on Parameters Constraints on Fitting ^^^^^^^^^^^^^^^^^^^^^^ -:class:`easyscience.Fitting.Fitting.Fitter` has the ability to evaluate user supplied constraints which effect the value of both fixed and non-fixed parameters. A good example of one such use case would be the ratio between two parameters, where you would create a :class:`easyscience.Fitting.Constraints.ObjConstraint`. +:class:`easyscience.fitting.Fitter` has the ability to evaluate user supplied constraints which effect the value of both fixed and non-fixed parameters. A good example of one such use case would be the ratio between two parameters, where you would create a :class:`easyscience.fitting.Constraints.ObjConstraint`. Using constraints ----------------- @@ -28,7 +28,7 @@ A constraint can be used in one of three ways; Assignment to a parameter, assign .. code-block:: python - from easyscience.Fitting.Constraints import NumericConstraint + from easyscience.fitting.Constraints import NumericConstraint from easyscience.Objects.Base import Parameter # Create an `a < 1` constraint a = Parameter('a', 0.5) @@ -41,7 +41,7 @@ A constraint can be used in one of three ways; Assignment to a parameter, assign Constraint Reference -------------------- -.. minigallery:: easyscience.Fitting.Constraints.NumericConstraint +.. minigallery:: easyscience.fitting.Constraints.NumericConstraint :add-heading: Examples using `Constraints` Built-in constraints @@ -49,27 +49,27 @@ Built-in constraints These are the built in constraints which you can use -.. autoclass:: easyscience.Fitting.Constraints.SelfConstraint +.. autoclass:: easyscience.fitting.Constraints.SelfConstraint :members: +enabled -.. autoclass:: easyscience.Fitting.Constraints.NumericConstraint +.. autoclass:: easyscience.fitting.Constraints.NumericConstraint :members: +enabled -.. autoclass:: easyscience.Fitting.Constraints.ObjConstraint +.. autoclass:: easyscience.fitting.Constraints.ObjConstraint :members: +enabled -.. autoclass:: easyscience.Fitting.Constraints.FunctionalConstraint +.. autoclass:: easyscience.fitting.Constraints.FunctionalConstraint :members: +enabled -.. autoclass:: easyscience.Fitting.Constraints.MultiObjConstraint +.. autoclass:: easyscience.fitting.Constraints.MultiObjConstraint :members: +enabled User created constraints ^^^^^^^^^^^^^^^^^^^^^^^^ -You can also make your own constraints by subclassing the :class:`easyscience.Fitting.Constraints.ConstraintBase` class. For this at a minimum the abstract methods ``_parse_operator`` and ``__repr__`` need to be written. +You can also make your own constraints by subclassing the :class:`easyscience.fitting.Constraints.ConstraintBase` class. For this at a minimum the abstract methods ``_parse_operator`` and ``__repr__`` need to be written. -.. autoclass:: easyscience.Fitting.Constraints.ConstraintBase +.. autoclass:: easyscience.fitting.Constraints.ConstraintBase :members: :private-members: :special-members: __repr__ \ No newline at end of file diff --git a/examples_old/dataset_examples.ipynb b/examples_old/dataset_examples.ipynb index 65b0a003..96b0cd19 100644 --- a/examples_old/dataset_examples.ipynb +++ b/examples_old/dataset_examples.ipynb @@ -9,7 +9,7 @@ "import numpy as np\n", "from easyscience.Datasets.xarray import xr\n", "from easyscience.Objects.Base import Parameter, BaseObj\n", - "from easyscience.Fitting.Fitting import Fitter" + "from easyscience.fitting import Fitter" ] }, { diff --git a/examples_old/dimer_example.ipynb b/examples_old/dimer_example.ipynb index bb708115..fc18f014 100644 --- a/examples_old/dimer_example.ipynb +++ b/examples_old/dimer_example.ipynb @@ -18,7 +18,7 @@ "import numpy as np\n", "from easyscience.Datasets.xarray import xr\n", "from easyscience.Objects.Base import Parameter, BaseObj\n", - "from easyscience.Fitting.Fitting import Fitter" + "from easyscience.fitting import Fitter" ] }, { @@ -914,7 +914,7 @@ "metadata": {}, "outputs": [], "source": [ - "from easyscience.Fitting.Fitting import Fitter\n", + "from easyscience.fitting import Fitter\n", "f = Fitter()\n", "f.initialize(sl, sl.dispersion)" ] diff --git a/examples_old/example1_dream.py b/examples_old/example1_dream.py index f485d058..bc80a125 100644 --- a/examples_old/example1_dream.py +++ b/examples_old/example1_dream.py @@ -19,7 +19,7 @@ def fit_fun(x): f = Fitter() f.initialize(b, fit_fun) -f.switch_engine("bumps") +f.switch_minimizer("bumps") x = np.array([1, 2, 3]) y = np.array([2, 4, 6]) - 1 diff --git a/examples_old/example5_broken.py b/examples_old/example5_broken.py index 17abd5d9..c88451d3 100644 --- a/examples_old/example5_broken.py +++ b/examples_old/example5_broken.py @@ -357,7 +357,7 @@ def __repr__(self): a = line.c # Now lets change fitting engine -f.switch_engine("bumps") +f.switch_minimizer("bumps") # Reset the values so we don't cheat line.m = 1 line.c = 0 diff --git a/examples_old/example6_broken.py b/examples_old/example6_broken.py index 0c2b9c0d..88c13a65 100644 --- a/examples_old/example6_broken.py +++ b/examples_old/example6_broken.py @@ -436,7 +436,7 @@ def __repr__(self): print(hybrid) # Now lets change fitting engine -f.switch_engine("bumps") +f.switch_minimizer("bumps") # Reset the values so we don't cheat hybrid.m = 1 hybrid.c = 0 diff --git a/examples_old/example_dataset2pt2.py b/examples_old/example_dataset2pt2.py index c33c9a91..20d5950d 100644 --- a/examples_old/example_dataset2pt2.py +++ b/examples_old/example_dataset2pt2.py @@ -43,7 +43,7 @@ def fit_fun(x, *args, **kwargs): b.m = m_starting_point b.c = c_starting_point - f.switch_engine(minimizer) + f.switch_minimizer(minimizer) f_res = d['y'].easyscience.fit(f, vectorize=True) print(f_res.p) diff --git a/examples_old/example_dataset2pt2_broken.py b/examples_old/example_dataset2pt2_broken.py index b56ea7ae..ecce0408 100644 --- a/examples_old/example_dataset2pt2_broken.py +++ b/examples_old/example_dataset2pt2_broken.py @@ -43,7 +43,7 @@ def fit_fun(x, *args, **kwargs): b.m = m_starting_point b.c = c_starting_point - f.switch_engine(minimizer) + f.switch_minimizer(minimizer) f_res = d['y'].easyscience.fit(f, vectorize=True) print(f_res.p) diff --git a/pyproject.toml b/pyproject.toml index 76304217..9d76e95f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,10 +34,11 @@ dependencies = [ "bumps", "DFO-LS", "lmfit", - "numpy", + "numpy==1.26", # Should be updated to numpy 2.0 "pint", "uncertainties", - "xarray" + "xarray", + "pint==0.23" # Only to ensure that unit is reported as dimensionless rather than empty string ] [project.optional-dependencies] diff --git a/src/easyscience/Datasets/xarray.py b/src/easyscience/Datasets/xarray.py index 36633213..b8b27c76 100644 --- a/src/easyscience/Datasets/xarray.py +++ b/src/easyscience/Datasets/xarray.py @@ -20,7 +20,7 @@ import xarray as xr from easyscience import ureg -from easyscience.Fitting import FitResults +from easyscience.fitting import FitResults T_ = TypeVar('T_') @@ -371,13 +371,13 @@ def fit( **kwargs, ) -> List[FitResults]: """ - Perform a fit on one or more DataArrays. This fit utilises a given fitter from `EasyScience.Fitting.Fitter`, though + Perform a fit on one or more DataArrays. This fit utilises a given fitter from `EasyScience.fitting.Fitter`, though there are a few differences to a standard EasyScience fit. In particular, key-word arguments to control the optimisation algorithm go in the `fit_kwargs` dictionary, fit function key-word arguments go in the `fn_kwargs` and given key-word arguments control the `xarray.apply_ufunc` function. :param fitter: Fitting object which controls the fitting - :type fitter: EasyScience.Fitting.Fitter + :type fitter: EasyScience.fitting.Fitter :param args: Arguments to go to the fit function :type args: Any :param dask: Dask control string. See `xarray.apply_ufunc` documentation @@ -681,13 +681,13 @@ def fit( **kwargs, ) -> FitResults: """ - Perform a fit on the given DataArray. This fit utilises a given fitter from `EasyScience.Fitting.Fitter`, though + Perform a fit on the given DataArray. This fit utilises a given fitter from `EasyScience.fitting.Fitter`, though there are a few differences to a standard EasyScience fit. In particular, key-word arguments to control the optimisation algorithm go in the `fit_kwargs` dictionary, fit function key-word arguments go in the `fn_kwargs` and given key-word arguments control the `xarray.apply_ufunc` function. :param fitter: Fitting object which controls the fitting - :type fitter: EasyScience.Fitting.Fitter + :type fitter: EasyScience.fitting.Fitter :param args: Arguments to go to the fit function :type args: Any :param dask: Dask control string. See `xarray.apply_ufunc` documentation @@ -732,7 +732,7 @@ def local_fit_func(x, *args, **kwargs): # Set the new callable to the fitter and initialize fitter.initialize(fitter.fit_object, local_fit_func) - # Make EasyScience.Fitting.Fitter compatible `x` + # Make EasyScience.fitting.Fitter compatible `x` x_for_fit = xr.concat(bdims, dim='fit_dim') x_for_fit = x_for_fit.stack(all_x=[d.name for d in bdims]) try: @@ -803,7 +803,7 @@ def check_sanity_multiple(fit_results: FitResults, originals: List[xr.DataArray] current_results = fit_results.__class__() # Fill out the basic stuff.... current_results.engine_result = fit_results.engine_result - current_results.fitting_engine = fit_results.fitting_engine + current_results.minimizer_engine = fit_results.minimizer_engine current_results.success = fit_results.success current_results.p = fit_results.p current_results.p0 = fit_results.p0 diff --git a/src/easyscience/Fitting/minimizers/__init__.py b/src/easyscience/Fitting/minimizers/__init__.py deleted file mode 100644 index 6afab1c7..00000000 --- a/src/easyscience/Fitting/minimizers/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# SPDX-FileCopyrightText: 2023 EasyScience contributors -# SPDX-License-Identifier: BSD-3-Clause -# © 2021-2023 Contributors to the EasyScience project BV: :rtype: """ # First check if we're already a virtual object - if getattr(obj, "_is_virtual", False): + if getattr(obj, '_is_virtual', False): new_obj = deepcopy(obj) old_obj = obj._borg.map.get_item_by_key(obj._derived_from) - constraint = ObjConstraint(new_obj, "", old_obj) + constraint = ObjConstraint(new_obj, '', old_obj) constraint.external = True - old_obj._constraints["virtual"][ - str(obj._borg.map.convert_id(new_obj).int) - ] = constraint - new_obj._constraints["builtin"] = dict() + old_obj._constraints['virtual'][str(obj._borg.map.convert_id(new_obj).int)] = constraint + new_obj._constraints['builtin'] = dict() # setattr(new_obj, "__previous_set", getattr(olobj, "__previous_set", None)) weakref.finalize( new_obj, @@ -144,42 +137,40 @@ def virtualizer(obj: BV) -> BV: return new_obj # The supplied class - klass = getattr(obj, "__old_class__", obj.__class__) + klass = getattr(obj, '__old_class__', obj.__class__) virtual_options = { - "_is_virtual": True, - "is_virtual": property(fget=lambda self: self._is_virtual), - "_derived_from": property(fget=lambda self: self._borg.map.convert_id(obj).int), - "__non_virtual_class__": klass, - "realize": realizer, - "relalize_component": component_realizer, + '_is_virtual': True, + 'is_virtual': property(fget=lambda self: self._is_virtual), + '_derived_from': property(fget=lambda self: self._borg.map.convert_id(obj).int), + '__non_virtual_class__': klass, + 'realize': realizer, + 'relalize_component': component_realizer, } import easyscience.Objects.Variable as ec_var if klass in ec_var.__dict__.values(): # is_variable check - virtual_options["fixed"] = property( + virtual_options['fixed'] = property( fget=lambda self: self._fixed, - fset=lambda self, value: raise_( - AttributeError("Virtual parameters cannot be fixed") - ), + fset=lambda self, value: raise_(AttributeError('Virtual parameters cannot be fixed')), ) # Generate a new class - cls = type("Virtual" + klass.__name__, (klass,), virtual_options) + cls = type('Virtual' + klass.__name__, (klass,), virtual_options) # Determine what to do next. args = [] # If `obj` is a parameter or descriptor etc, then simple mods. - if hasattr(obj, "_constructor"): + if hasattr(obj, '_constructor'): # All Variables are based on the Descriptor. d = obj.encode_data() - if hasattr(d, "fixed"): - d["fixed"] = True + if hasattr(d, 'fixed'): + d['fixed'] = True v_p = cls(**d) v_p._enabled = False - constraint = ObjConstraint(v_p, "", obj) + constraint = ObjConstraint(v_p, '', obj) constraint.external = True - obj._constraints["virtual"][str(cls._borg.map.convert_id(v_p).int)] = constraint - v_p._constraints["builtin"] = dict() - setattr(v_p, "__previous_set", getattr(obj, "__previous_set", None)) + obj._constraints['virtual'][str(cls._borg.map.convert_id(v_p).int)] = constraint + v_p._constraints['builtin'] = dict() + setattr(v_p, '__previous_set', getattr(obj, '__previous_set', None)) weakref.finalize( v_p, _remover, diff --git a/src/easyscience/__init__.py b/src/easyscience/__init__.py index 91cfb3ac..606c7204 100644 --- a/src/easyscience/__init__.py +++ b/src/easyscience/__init__.py @@ -10,8 +10,6 @@ from easyscience.__version__ import __version__ as __version__ from easyscience.Objects.Borg import Borg -default_fitting_engine = 'lmfit' - ureg = pint.UnitRegistry() borg = Borg() borg.instantiate_stack() diff --git a/src/easyscience/Fitting/Constraints.py b/src/easyscience/fitting/Constraints.py similarity index 97% rename from src/easyscience/Fitting/Constraints.py rename to src/easyscience/fitting/Constraints.py index c2405c32..9d947386 100644 --- a/src/easyscience/Fitting/Constraints.py +++ b/src/easyscience/fitting/Constraints.py @@ -187,7 +187,7 @@ def __init__(self, dependent_obj: V, operator: str, value: Number): :example: .. code-block:: python - from easyscience.Fitting.Constraints import NumericConstraint + from easyscience.fitting.Constraints import NumericConstraint from easyscience.Objects.Base import Parameter # Create an `a < 1` constraint a = Parameter('a', 0.2) @@ -243,7 +243,7 @@ def __init__(self, dependent_obj: V, operator: str, value: str): :example: .. code-block:: python - from easyscience.Fitting.Constraints import SelfConstraint + from easyscience.fitting.Constraints import SelfConstraint from easyscience.Objects.Base import Parameter # Create an `a < a.max` constraint a = Parameter('a', 0.2, max=1) @@ -297,7 +297,7 @@ def __init__(self, dependent_obj: V, operator: str, independent_obj: V): :example: .. code-block:: python - from easyscience.Fitting.Constraints import ObjConstraint + from easyscience.fitting.Constraints import ObjConstraint from easyscience.Objects.Base import Parameter # Create an `a = 2 * b` constraint a = Parameter('a', 0.2) @@ -331,7 +331,7 @@ def __repr__(self) -> str: class MultiObjConstraint(ConstraintBase): """ - A `MultiObjConstraint` is similar to :class:`EasyScience.Fitting.Constraints.ObjConstraint` except that it relates to + A `MultiObjConstraint` is similar to :class:`EasyScience.fitting.Constraints.ObjConstraint` except that it relates to multiple independent objects. """ @@ -343,7 +343,7 @@ def __init__( value: Number, ): """ - A `MultiObjConstraint` is similar to :class:`EasyScience.Fitting.Constraints.ObjConstraint` except that it relates + A `MultiObjConstraint` is similar to :class:`EasyScience.fitting.Constraints.ObjConstraint` except that it relates to one or more independent objects. E.g. @@ -360,7 +360,7 @@ def __init__( .. code-block:: python - from easyscience.Fitting.Constraints import MultiObjConstraint + from easyscience.fitting.Constraints import MultiObjConstraint from easyscience.Objects.Base import Parameter # Create an `a + b = 1` constraint a = Parameter('a', 0.2) @@ -376,7 +376,7 @@ def __init__( .. code-block:: python - from easyscience.Fitting.Constraints import MultiObjConstraint + from easyscience.fitting.Constraints import MultiObjConstraint from easyscience.Objects.Base import Parameter # Create an `a + b - 2c = 0` constraint a = Parameter('a', 0.5) @@ -442,7 +442,7 @@ def __init__( .. code-block:: python import numpy as np - from easyscience.Fitting.Constraints import FunctionalConstraint + from easyscience.fitting.Constraints import FunctionalConstraint from easyscience.Objects.Base import Parameter a = Parameter('a', 0.2, max=1) diff --git a/src/easyscience/Fitting/__init__.py b/src/easyscience/fitting/__init__.py similarity index 57% rename from src/easyscience/Fitting/__init__.py rename to src/easyscience/fitting/__init__.py index 4c4f3ac1..698b5831 100644 --- a/src/easyscience/Fitting/__init__.py +++ b/src/easyscience/fitting/__init__.py @@ -1,3 +1,4 @@ from .fitter import Fitter # noqa: F401, E402 from .minimizers.minimizer_base import FitResults # noqa: F401, E402 -from .multi_fitter import MultiFitter # noqa: F401, E402 +# Causes circular import +# from .multi_fitter import MultiFitter # noqa: F401, E402 diff --git a/src/easyscience/Fitting/fitter.py b/src/easyscience/fitting/fitter.py similarity index 53% rename from src/easyscience/Fitting/fitter.py rename to src/easyscience/fitting/fitter.py index dc950eec..07ca844e 100644 --- a/src/easyscience/Fitting/fitter.py +++ b/src/easyscience/fitting/fitter.py @@ -1,193 +1,125 @@ -__author__ = 'github.com/wardsimon' -__version__ = '0.0.1' - -import functools - # SPDX-FileCopyrightText: 2023 EasyScience contributors # SPDX-License-Identifier: BSD-3-Clause # © 2021-2023 Contributors to the EasyScience project Callable: - """ - Simple fit function which injects the real X (independent) values into the - optimizer function. This will also flatten the results if needed. - :param real_x: Independent x parameters to be injected - :param flatten: Should the result be a flat 1D array? - :return: Wrapped optimizer function. - """ - fun = self._fit_function + def fit_constraints(self) -> list: + return self._minimizer.fit_constraints() - @functools.wraps(fun) - def wrapped_fit_function(x, **kwargs): - if real_x is not None: - x = real_x - dependent = fun(x, **kwargs) - if flatten: - dependent = dependent.flatten() - return dependent + def add_fit_constraint(self, constraint) -> None: + self._minimizer.add_fit_constraint(constraint) - return wrapped_fit_function + def remove_fit_constraint(self, index: int) -> None: + self._minimizer.remove_fit_constraint(index) - def initialize(self, fit_object, fit_function: Callable): + def make_model(self, pars=None) -> Callable: + return self._minimizer.make_model(pars) + + def evaluate(self, pars=None) -> np.ndarray: + return self._minimizer.evaluate(pars) + + def convert_to_pars_obj(self, pars) -> object: + return self._minimizer.convert_to_pars_obj(pars) + + # TODO: remove this method when we are ready to adjust the dependent products + def initialize(self, fit_object, fit_function: Callable) -> None: """ Set the model and callable in the calculator interface. :param fit_object: The EasyScience model object :param fit_function: The function to be optimized against. - :return: None """ self._fit_object = fit_object self._fit_function = fit_function - self.__initialize() + self._update_minimizer(DEFAULT_MINIMIZER) - def __initialize(self): + # TODO: remove this method when we are ready to adjust the dependent products + def create(self, minimizer_name: str = DEFAULT_MINIMIZER) -> None: """ - The real initialization. Setting the optimizer object properly - :return: None + Create the required minimizer. + :param minimizer_name: The label of the minimization engine to create. """ - self.__engine_obj = self._current_engine(self._fit_object, self.fit_function) - self._is_initialized = True + self._update_minimizer(minimizer_name) - def create(self, engine_name: str = default_fitting_engine): + def switch_minimizer(self, minimizer_name: str) -> None: """ - Create a backend optimization engine. - :param engine_name: The label of the optimization engine to create. - :return: None + Switch minimizer and initialize. + :param minimizer_name: The label of the minimizer to create and instantiate. """ - engines = self.available_engines - if engine_name in engines: - self._current_engine = self._engines[engines.index(engine_name)] - self._is_initialized = False - else: - raise AttributeError(f"The supplied optimizer engine '{engine_name}' is unknown.") + constraints = self._minimizer.fit_constraints() + self._update_minimizer(minimizer_name) + self._minimizer.set_fit_constraint(constraints) - def switch_engine(self, engine_name: str): - """ - Switch backend optimization engines and initialize. - :param engine_name: The label of the optimization engine to create and instantiate. - :return: None - """ - # There isn't any state to carry over - if not self._is_initialized: - raise ReferenceError('The fitting engine must be initialized before switching') - # Constrains are not carried over. Do it manually. - constraints = self.__engine_obj._constraints - self.create(engine_name) - self.__initialize() - self.__engine_obj._constraints = constraints + def _update_minimizer(self, minimizer_name: str) -> None: + minimizer_enum = from_string_to_enum(minimizer_name) + self._minimizer = factory(minimizer_enum=minimizer_enum, fit_object=self._fit_object, fit_function=self.fit_function) + self._name_current_minimizer = minimizer_name @property - def available_engines(self) -> List[str]: + def available_minimizers(self) -> List[str]: """ - Get a list of the names of available fitting engines + Get a list of the names of available fitting minimizers - :return: List of available fitting engines + :return: List of available fitting minimizers :rtype: List[str] """ - if minimizers.engines is None: - raise ImportError('There are no available fitting engines. Install `lmfit` and/or `bumps`') - return [engine.name for engine in minimizers.engines] - - @property - def can_fit(self) -> bool: - """ - Can a fit be performed. i.e has the object been created properly - - :return: Can a fit be performed - :rtype: bool - """ - return self._is_initialized + return [minimize.name for minimize in AvailableMinimizers] @property - def current_engine(self) -> _C: + def minimizer(self) -> MinimizerBase: """ - Get the class object of the current fitting engine. - - :return: Class of the current fitting engine (based on the `FittingTemplate` class) - :rtype: _T - """ - return self._current_engine - - @property - def engine(self) -> _M: - """ - Get the current fitting engine object. + Get the current fitting minimizer object. :return: - :rtype: _M + :rtype: MinimizerBase """ - return self.__engine_obj + return self._minimizer @property def fit_function(self) -> Callable: """ - The raw fit function that the optimizer will call (no wrapping) + The raw fit function that the optimizer will call (no wrapping) :return: Raw fit function """ return self._fit_function @fit_function.setter - def fit_function(self, fit_function: Callable): + def fit_function(self, fit_function: Callable) -> None: """ Set the raw fit function to a new one. :param fit_function: New fit function :return: None """ self._fit_function = fit_function - self.__initialize() + self._update_minimizer(self._name_current_minimizer) @property def fit_object(self): @@ -198,32 +130,35 @@ def fit_object(self): return self._fit_object @fit_object.setter - def fit_object(self, fit_object): + def fit_object(self, fit_object) -> None: """ Set the EasyScience object which wil be used as a model :param fit_object: New EasyScience object :return: None """ self._fit_object = fit_object - self.__initialize() + self._update_minimizer(self._name_current_minimizer) - def __pass_through_generator(self, name: str): + def _fit_function_wrapper(self, real_x=None, flatten: bool = True) -> Callable: """ - Attach the attributes of the calculator template to the current fitter instance. - :param name: Attribute name to attach - :return: Wrapped calculator interface object. + Simple fit function which injects the real X (independent) values into the + optimizer function. This will also flatten the results if needed. + :param real_x: Independent x parameters to be injected + :param flatten: Should the result be a flat 1D array? + :return: Wrapped optimizer function. """ - obj = self + fun = self._fit_function - def inner(*args, **kwargs): - if not obj.can_fit: - raise ReferenceError('The fitting engine must first be initialized') - func = getattr(obj.engine, name, None) - if func is None: - raise ValueError('The fitting engine does not have the attribute "{}"'.format(name)) - return func(*args, **kwargs) + @functools.wraps(fun) + def wrapped_fit_function(x, **kwargs): + if real_x is not None: + x = real_x + dependent = fun(x, **kwargs) + if flatten: + dependent = dependent.flatten() + return dependent - return inner + return wrapped_fit_function @property def fit(self) -> Callable: @@ -233,7 +168,7 @@ def fit(self) -> Callable: re-constitute the independent variables and once the fit is completed, reshape the inputs to those expected. """ - @functools.wraps(self.engine.fit) + @functools.wraps(self.minimizer.fit) def inner_fit_callable( x: np.ndarray, y: np.ndarray, @@ -248,29 +183,25 @@ def inner_fit_callable( - FIT = Wrapping the fit function and performing the fit - POST = Reshaping the outputs so it is coherent with the inputs. """ - # Check to see if we can perform a fit - if not self.can_fit: - raise ReferenceError('The fitting engine must first be initialized') - # Precompute - Reshape all independents into the correct dimensionality - x_fit, x_new, y_new, weights, dims, kwargs = self._precompute_reshaping(x, y, weights, vectorized, kwargs) + x_fit, x_new, y_new, weights, dims = self._precompute_reshaping(x, y, weights, vectorized) self._dependent_dims = dims # Fit - fit_fun = self._fit_function + fit_fun_org = self._fit_function fit_fun_wrap = self._fit_function_wrapper(x_new, flatten=True) # This should be wrapped. # We change the fit function, so have to reset constraints - constraints = self.__engine_obj._constraints + constraints = self._minimizer.fit_constraints() self.fit_function = fit_fun_wrap - self.__engine_obj._constraints = constraints - f_res = self.engine.fit(x_fit, y_new, weights=weights, **kwargs) + self._minimizer.set_fit_constraint(constraints) + f_res = self.minimizer.fit(x_fit, y_new, weights=weights, **kwargs) # Postcompute - fit_result = self._post_compute_reshaping(f_res, x, y, weights) + fit_result = self._post_compute_reshaping(f_res, x, y) # Reset the function and constrains - self.fit_function = fit_fun - self.__engine_obj._constraints = constraints + self.fit_function = fit_fun_org + self._minimizer.set_fit_constraint(constraints) return fit_result return inner_fit_callable @@ -281,7 +212,6 @@ def _precompute_reshaping( y: np.ndarray, weights: Optional[np.ndarray], vectorized: bool, - kwargs, ): """ Check the dimensions of the inputs and reshape if necessary. @@ -323,10 +253,10 @@ def _precompute_reshaping( weights = np.array(weights).flatten() # Make a 'dummy' x array for the fit function x_for_fit = np.array(range(y_new.size)) - return x_for_fit, x_new, y_new, weights, x_shape, kwargs + return x_for_fit, x_new, y_new, weights, x_shape @staticmethod - def _post_compute_reshaping(fit_result: FitResults, x: np.ndarray, y: np.ndarray, weights: np.ndarray) -> FitResults: + def _post_compute_reshaping(fit_result: FitResults, x: np.ndarray, y: np.ndarray) -> FitResults: """ Reshape the output of the fitter into the correct dimensions. :param fit_result: Output from the fitter @@ -334,8 +264,8 @@ def _post_compute_reshaping(fit_result: FitResults, x: np.ndarray, y: np.ndarray :param y: Input y dependent :return: Reshaped Fit Results """ - setattr(fit_result, 'x', x) - setattr(fit_result, 'y_obs', y) - setattr(fit_result, 'y_calc', np.reshape(fit_result.y_calc, y.shape)) - setattr(fit_result, 'y_err', np.reshape(fit_result.y_err, y.shape)) + fit_result.x = x + fit_result.y_obs = y + fit_result.y_calc = np.reshape(fit_result.y_calc, y.shape) + fit_result.y_err = np.reshape(fit_result.y_err, y.shape) return fit_result diff --git a/src/easyscience/fitting/minimizers/__init__.py b/src/easyscience/fitting/minimizers/__init__.py new file mode 100644 index 00000000..2d5198d7 --- /dev/null +++ b/src/easyscience/fitting/minimizers/__init__.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: 2023 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +# © 2021-2023 Contributors to the EasyScience project AvailableMinimizers: + if minimizer_name == 'lmfit': + minmizer_enum = AvailableMinimizers.LMFit + elif minimizer_name == 'lmfit-leastsq': + minmizer_enum = AvailableMinimizers.LMFit_leastsq + elif minimizer_name == 'lmfit-powell': + minmizer_enum = AvailableMinimizers.LMFit_powell + elif minimizer_name == 'lmfit-cobyla': + minmizer_enum = AvailableMinimizers.LMFit_cobyla + + elif minimizer_name == 'bumps': + minmizer_enum = AvailableMinimizers.Bumps + elif minimizer_name == 'bumps-simplex': + minmizer_enum = AvailableMinimizers.Bumps_simplex + elif minimizer_name == 'bumps-newton': + minmizer_enum = AvailableMinimizers.Bumps_newton + elif minimizer_name == 'bumps-lm': + minmizer_enum = AvailableMinimizers.Bumps_lm + + elif minimizer_name == 'dfo': + minmizer_enum = AvailableMinimizers.DFO + elif minimizer_name == 'dfo-leastsq': + minmizer_enum = AvailableMinimizers.DFO_leastsq + + return minmizer_enum + + +def factory(minimizer_enum: AvailableMinimizers, fit_object, fit_function: Callable) -> MinimizerBase: + if minimizer_enum == AvailableMinimizers.LMFit: + minimizer = LMFit(obj=fit_object, fit_function=fit_function, method='leastsq') + elif minimizer_enum == AvailableMinimizers.LMFit_leastsq: + minimizer = LMFit(obj=fit_object, fit_function=fit_function, method='leastsq') + elif minimizer_enum == AvailableMinimizers.LMFit_powell: + minimizer = LMFit(obj=fit_object, fit_function=fit_function, method='powell') + elif minimizer_enum == AvailableMinimizers.LMFit_cobyla: + minimizer = LMFit(obj=fit_object, fit_function=fit_function, method='cobyla') + + elif minimizer_enum == AvailableMinimizers.Bumps: + minimizer = Bumps(obj=fit_object, fit_function=fit_function, method='amoeba') + elif minimizer_enum == AvailableMinimizers.Bumps_simplex: + minimizer = Bumps(obj=fit_object, fit_function=fit_function, method='amoeba') + elif minimizer_enum == AvailableMinimizers.Bumps_newton: + minimizer = Bumps(obj=fit_object, fit_function=fit_function, method='newton') + elif minimizer_enum == AvailableMinimizers.Bumps_lm: + minimizer = Bumps(obj=fit_object, fit_function=fit_function, method='lm') + + elif minimizer_enum == AvailableMinimizers.DFO: + minimizer = DFO(obj=fit_object, fit_function=fit_function, method='leastsq') + elif minimizer_enum == AvailableMinimizers.DFO_leastsq: + minimizer = DFO(obj=fit_object, fit_function=fit_function, method='leastsq') + + return minimizer diff --git a/src/easyscience/Fitting/minimizers/minimizer_base.py b/src/easyscience/fitting/minimizers/minimizer_base.py similarity index 87% rename from src/easyscience/Fitting/minimizers/minimizer_base.py rename to src/easyscience/fitting/minimizers/minimizer_base.py index 95ff0525..4c28c878 100644 --- a/src/easyscience/Fitting/minimizers/minimizer_base.py +++ b/src/easyscience/fitting/minimizers/minimizer_base.py @@ -1,6 +1,3 @@ -__author__ = 'github.com/wardsimon' -__version__ = '0.1.0' - # SPDX-FileCopyrightText: 2023 EasyScience contributors # SPDX-License-Identifier: BSD-3-Clause # © 2021-2023 Contributors to the EasyScience project list: @@ -51,6 +37,9 @@ def all_constraints(self) -> list: def fit_constraints(self) -> list: return self._constraints + def set_fit_constraint(self, constraints): + self._constraints = constraints + def add_fit_constraint(self, constraint): self._constraints.append(constraint) @@ -169,15 +158,6 @@ def _gen_fit_results(self, fit_results, **kwargs) -> 'FitResults': :rtype: FitResults """ - @abstractmethod - def available_methods(self) -> List[str]: - """ - Generate a list of available methods - - :return: List of available methods for minimization - :rtype: List[str] - """ - @staticmethod def _error_from_jacobian(jacobian: np.ndarray, residuals: np.ndarray, confidence: float = 0.95) -> np.ndarray: from scipy import stats diff --git a/src/easyscience/Fitting/minimizers/minimizer_bumps.py b/src/easyscience/fitting/minimizers/minimizer_bumps.py similarity index 97% rename from src/easyscience/Fitting/minimizers/minimizer_bumps.py rename to src/easyscience/fitting/minimizers/minimizer_bumps.py index 05000fd7..0c0862d5 100644 --- a/src/easyscience/Fitting/minimizers/minimizer_bumps.py +++ b/src/easyscience/fitting/minimizers/minimizer_bumps.py @@ -2,9 +2,6 @@ # SPDX-License-Identifier: BSD-3-Clause # © 2021-2023 Contributors to the EasyScience project FitResults: results.y_err = self._cached_model.dy # results.residual = results.y_obs - results.y_calc # results.goodness_of_fit = np.sum(results.residual**2) - results.fitting_engine = self.__class__ + results.minimizer_engine = self.__class__ results.fit_args = None results.engine_result = fit_results # results.check_sanity() diff --git a/src/easyscience/Fitting/minimizers/minimizer_dfo.py b/src/easyscience/fitting/minimizers/minimizer_dfo.py similarity index 96% rename from src/easyscience/Fitting/minimizers/minimizer_dfo.py rename to src/easyscience/fitting/minimizers/minimizer_dfo.py index 48afbffe..ae3ba93f 100644 --- a/src/easyscience/Fitting/minimizers/minimizer_dfo.py +++ b/src/easyscience/fitting/minimizers/minimizer_dfo.py @@ -2,10 +2,7 @@ # SPDX-License-Identifier: BSD-3-Clause # © 2021-2023 Contributors to the EasyScience project Callable: @@ -153,8 +149,9 @@ def fit( :return: Fit results :rtype: ModelResult """ - default_method = {} + if self._method is not None: + default_method = {'method': self._method} if method is not None and method in self.available_methods(): default_method['method'] = method @@ -252,7 +249,7 @@ def _gen_fit_results(self, fit_results, weights, **kwargs) -> FitResults: # results.residual = results.y_obs - results.y_calc # results.goodness_of_fit = fit_results.f - results.fitting_engine = self.__class__ + results.minimizer_engine = self.__class__ results.fit_args = None # results.check_sanity() diff --git a/src/easyscience/Fitting/minimizers/minimizer_lmfit.py b/src/easyscience/fitting/minimizers/minimizer_lmfit.py similarity index 98% rename from src/easyscience/Fitting/minimizers/minimizer_lmfit.py rename to src/easyscience/fitting/minimizers/minimizer_lmfit.py index 0adad4ad..7f5e5910 100644 --- a/src/easyscience/Fitting/minimizers/minimizer_lmfit.py +++ b/src/easyscience/fitting/minimizers/minimizer_lmfit.py @@ -2,9 +2,6 @@ # SPDX-License-Identifier: BSD-3-Clause # © 2021-2023 Contributors to the EasyScience project LMModel: """ @@ -160,6 +156,8 @@ def fit( :rtype: ModelResult """ default_method = {} + if self._method is not None: + default_method = {'method': self._method} if method is not None and method in self.available_methods(): default_method['method'] = method @@ -277,7 +275,7 @@ def _gen_fit_results(self, fit_results: ModelResult, **kwargs) -> FitResults: # results.goodness_of_fit = fit_results.chisqr results.y_calc = fit_results.best_fit results.y_err = 1 / fit_results.weights - results.fitting_engine = self.__class__ + results.minimizer_engine = self.__class__ results.fit_args = None results.engine_result = fit_results diff --git a/src/easyscience/Fitting/minimizers/utils.py b/src/easyscience/fitting/minimizers/utils.py similarity index 96% rename from src/easyscience/Fitting/minimizers/utils.py rename to src/easyscience/fitting/minimizers/utils.py index ff56e87a..59e0792e 100644 --- a/src/easyscience/Fitting/minimizers/utils.py +++ b/src/easyscience/fitting/minimizers/utils.py @@ -8,7 +8,7 @@ class FitResults: __slots__ = [ 'success', - 'fitting_engine', + 'minimizer_engine', 'fit_args', 'p', 'p0', @@ -23,7 +23,7 @@ class FitResults: def __init__(self): self.success = False - self.fitting_engine = None + self.minimizer_engine = None self.fit_args = {} self.p = {} self.p0 = {} diff --git a/src/easyscience/Fitting/multi_fitter.py b/src/easyscience/fitting/multi_fitter.py similarity index 90% rename from src/easyscience/Fitting/multi_fitter.py rename to src/easyscience/fitting/multi_fitter.py index f7f89fd8..c812ff0e 100644 --- a/src/easyscience/Fitting/multi_fitter.py +++ b/src/easyscience/fitting/multi_fitter.py @@ -1,6 +1,3 @@ -__author__ = 'github.com/wardsimon' -__version__ = '0.0.1' - # SPDX-FileCopyrightText: 2023 EasyScience contributors # SPDX-License-Identifier: BSD-3-Clause # © 2021-2023 Contributors to the EasyScience project List[FitResults]: """ Take a fit results object and split it into n chuncks based on the size of the x, y inputs @@ -122,9 +117,9 @@ def _post_compute_reshaping( current_results = cls() ep = sp + int(np.array(self._dependent_dims[idx]).prod()) - # Fill out the new result obj (see EasyScience.Fitting.Fitting_template.FitResults) + # Fill out the new result obj (see EasyScience.fitting.Fitting_template.FitResults) current_results.success = fit_result_obj.success - current_results.fitting_engine = fit_result_obj.fitting_engine + current_results.minimizer_engine = fit_result_obj.minimizer_engine current_results.p = fit_result_obj.p current_results.p0 = fit_result_obj.p0 current_results.x = this_x diff --git a/tests/integration_tests/Fitting/test_fitter.py b/tests/integration_tests/Fitting/test_fitter.py index 0180ef36..2449fccb 100644 --- a/tests/integration_tests/Fitting/test_fitter.py +++ b/tests/integration_tests/Fitting/test_fitter.py @@ -8,9 +8,9 @@ import pytest import numpy as np -from easyscience.Fitting.Constraints import ObjConstraint -from easyscience.Fitting.fitter import Fitter -from easyscience.Fitting.minimizers import FitError +from easyscience.fitting.Constraints import ObjConstraint +from easyscience.fitting.fitter import Fitter +from easyscience.fitting.minimizers import FitError from easyscience.Objects.ObjectClasses import BaseObj from easyscience.Objects.ObjectClasses import Parameter @@ -78,7 +78,7 @@ def check_fit_results(result, sp_sin, ref_sin, x, **kwargs): @pytest.mark.parametrize("with_errors", [False, True]) -@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo_ls"]) +@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo"]) def test_basic_fit(fit_engine, with_errors): ref_sin = AbsSin(0.2, np.pi) sp_sin = AbsSin(0.354, 3.05) @@ -92,7 +92,7 @@ def test_basic_fit(fit_engine, with_errors): f = Fitter(sp_sin, sp_sin) if fit_engine is not None: try: - f.switch_engine(fit_engine) + f.switch_minimizer(fit_engine) except AttributeError: pytest.skip(msg=f"{fit_engine} is not installed") args = [x, y] @@ -102,12 +102,12 @@ def test_basic_fit(fit_engine, with_errors): result = f.fit(*args, **kwargs) if fit_engine is not None: - assert result.fitting_engine.name == fit_engine + assert result.minimizer_engine.wrapping == fit_engine assert sp_sin.phase.raw_value == pytest.approx(ref_sin.phase.raw_value, rel=1e-3) assert sp_sin.offset.raw_value == pytest.approx(ref_sin.offset.raw_value, rel=1e-3) -@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo_ls"]) +@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo"]) def test_fit_result(fit_engine): ref_sin = AbsSin(0.2, np.pi) sp_sin = AbsSin(0.354, 3.05) @@ -131,7 +131,7 @@ def test_fit_result(fit_engine): if fit_engine is not None: try: - f.switch_engine(fit_engine) + f.switch_minimizer(fit_engine) except AttributeError: pytest.skip(msg=f"{fit_engine} is not installed") @@ -151,12 +151,12 @@ def test_lmfit_methods(fit_method): sp_sin.phase.fixed = False f = Fitter(sp_sin, sp_sin) - assert fit_method in f.available_methods() + assert fit_method in f._minimizer.available_methods() result = f.fit(x, y, method=fit_method) check_fit_results(result, sp_sin, ref_sin, x) -@pytest.mark.xfail(reason="known bumps issue") +#@pytest.mark.xfail(reason="known bumps issue") @pytest.mark.parametrize("fit_method", ["newton", "lm"]) def test_bumps_methods(fit_method): ref_sin = AbsSin(0.2, np.pi) @@ -169,13 +169,13 @@ def test_bumps_methods(fit_method): sp_sin.phase.fixed = False f = Fitter(sp_sin, sp_sin) - f.switch_engine("bumps") - assert fit_method in f.available_methods() + f.switch_minimizer("bumps") + assert fit_method in f._minimizer.available_methods() result = f.fit(x, y, method=fit_method) check_fit_results(result, sp_sin, ref_sin, x) -@pytest.mark.parametrize("fit_engine", ["lmfit", "bumps", "dfo_ls"]) +@pytest.mark.parametrize("fit_engine", ["lmfit", "bumps", "dfo"]) def test_fit_constraints(fit_engine): ref_sin = AbsSin(np.pi * 0.45, 0.45 * np.pi * 0.5) sp_sin = AbsSin(1, 0.5) @@ -193,7 +193,7 @@ def test_fit_constraints(fit_engine): if fit_engine is not None: try: - f.switch_engine(fit_engine) + f.switch_minimizer(fit_engine) except AttributeError: pytest.skip(msg=f"{fit_engine} is not installed") @@ -205,7 +205,7 @@ def test_fit_constraints(fit_engine): @pytest.mark.parametrize("with_errors", [False, True]) -@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo_ls"]) +@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo"]) def test_2D_vectorized(fit_engine, with_errors): x = np.linspace(0, 5, 200) mm = AbsSin2D(0.3, 1.6) @@ -217,7 +217,7 @@ def test_2D_vectorized(fit_engine, with_errors): ff = Fitter(m2, m2) if fit_engine is not None: try: - ff.switch_engine(fit_engine) + ff.switch_minimizer(fit_engine) except AttributeError: pytest.skip(msg=f"{fit_engine} is not installed") try: @@ -241,7 +241,7 @@ def test_2D_vectorized(fit_engine, with_errors): @pytest.mark.parametrize("with_errors", [False, True]) -@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo_ls"]) +@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo"]) def test_2D_non_vectorized(fit_engine, with_errors): x = np.linspace(0, 5, 200) mm = AbsSin2DL(0.3, 1.6) @@ -253,7 +253,7 @@ def test_2D_non_vectorized(fit_engine, with_errors): ff = Fitter(m2, m2) if fit_engine is not None: try: - ff.switch_engine(fit_engine) + ff.switch_minimizer(fit_engine) except AttributeError: pytest.skip(msg=f"{fit_engine} is not installed") try: diff --git a/tests/integration_tests/Fitting/test_multi_fitter.py b/tests/integration_tests/Fitting/test_multi_fitter.py index 5ec8de9e..1d6af1ff 100644 --- a/tests/integration_tests/Fitting/test_multi_fitter.py +++ b/tests/integration_tests/Fitting/test_multi_fitter.py @@ -8,9 +8,9 @@ import pytest import numpy as np -from easyscience.Fitting.Constraints import ObjConstraint -from easyscience.Fitting.multi_fitter import MultiFitter -from easyscience.Fitting.minimizers import FitError +from easyscience.fitting.Constraints import ObjConstraint +from easyscience.fitting.multi_fitter import MultiFitter +from easyscience.fitting.minimizers import FitError from easyscience.Objects.ObjectClasses import BaseObj from easyscience.Objects.ObjectClasses import Parameter @@ -59,7 +59,7 @@ def __call__(self, x): @pytest.mark.parametrize("with_errors", [False, True]) -@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo_ls"]) +@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo"]) def test_multi_fit(fit_engine, with_errors): ref_sin_1 = AbsSin(0.2, np.pi) sp_sin_1 = AbsSin(0.354, 3.05) @@ -88,7 +88,7 @@ def test_multi_fit(fit_engine, with_errors): f = MultiFitter([sp_sin_1, sp_sin_2], [sp_sin_1, sp_sin_2]) if fit_engine is not None: try: - f.switch_engine(fit_engine) + f.switch_minimizer(fit_engine) except AttributeError: pytest.skip(msg=f"{fit_engine} is not installed") @@ -120,7 +120,7 @@ def test_multi_fit(fit_engine, with_errors): @pytest.mark.parametrize("with_errors", [False, True]) -@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo_ls"]) +@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo"]) def test_multi_fit2(fit_engine, with_errors): ref_sin_1 = AbsSin(0.2, np.pi) sp_sin_1 = AbsSin(0.354, 3.05) @@ -163,7 +163,7 @@ def test_multi_fit2(fit_engine, with_errors): f = MultiFitter([sp_sin_1, sp_line, sp_sin_2], [sp_sin_1, sp_line, sp_sin_2]) if fit_engine is not None: try: - f.switch_engine(fit_engine) + f.switch_minimizer(fit_engine) except AttributeError: pytest.skip(msg=f"{fit_engine} is not installed") @@ -198,7 +198,7 @@ def test_multi_fit2(fit_engine, with_errors): @pytest.mark.parametrize("with_errors", [False, True]) -@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo_ls"]) +@pytest.mark.parametrize("fit_engine", [None, "lmfit", "bumps", "dfo"]) def test_multi_fit_1D_2D(fit_engine, with_errors): # Generate fit and reference objects ref_sin1D = AbsSin(0.2, np.pi) @@ -232,7 +232,7 @@ def test_multi_fit_1D_2D(fit_engine, with_errors): ff = MultiFitter([sp_sin1D, sp_sin2D], [sp_sin1D, sp_sin2D]) if fit_engine is not None: try: - ff.switch_engine(fit_engine) + ff.switch_minimizer(fit_engine) except AttributeError: pytest.skip(msg=f"{fit_engine} is not installed") @@ -243,7 +243,7 @@ def test_multi_fit_1D_2D(fit_engine, with_errors): f = MultiFitter([sp_sin1D, sp_sin2D], [sp_sin1D, sp_sin2D]) if fit_engine is not None: try: - f.switch_engine(fit_engine) + f.switch_minimizer(fit_engine) except AttributeError: pytest.skip(msg=f"{fit_engine} is not installed") try: diff --git a/tests/integration_tests/test_undoRedo.py b/tests/integration_tests/test_undoRedo.py index 5db3a36a..fe8f0366 100644 --- a/tests/integration_tests/test_undoRedo.py +++ b/tests/integration_tests/test_undoRedo.py @@ -14,6 +14,7 @@ from easyscience.Objects.ObjectClasses import BaseObj from easyscience.Objects.Variable import Descriptor from easyscience.Objects.Variable import Parameter +from easyscience.fitting import Fitter def createSingleObjs(idx): @@ -229,7 +230,7 @@ def test_UndoRedoMacros(): assert item.raw_value == old_value + offset -@pytest.mark.parametrize("fit_engine", ["lmfit", "bumps", "dfo_ls"]) +@pytest.mark.parametrize("fit_engine", ["lmfit", "bumps", "dfo"]) def test_fittingUndoRedo(fit_engine): m_value = 6 c_value = 2 @@ -265,11 +266,9 @@ def __call__(self, x: np.ndarray) -> np.ndarray: y = l1(x) + 0.125 * (dy - 0.5) - from easyscience.Fitting import Fitter - f = Fitter(l2, l2) try: - f.switch_engine(fit_engine) + f.switch_minimizer(fit_engine) except AttributeError: pytest.skip(msg=f"{fit_engine} is not installed") diff --git a/tests/unit_tests/Fitting/minimizers/test_factory.py b/tests/unit_tests/Fitting/minimizers/test_factory.py new file mode 100644 index 00000000..64754925 --- /dev/null +++ b/tests/unit_tests/Fitting/minimizers/test_factory.py @@ -0,0 +1,60 @@ +from easyscience.fitting.minimizers.factory import factory +from easyscience.fitting.minimizers.factory import from_string_to_enum +from easyscience.fitting.minimizers.factory import AvailableMinimizers +from easyscience.fitting.minimizers import MinimizerBase +from unittest.mock import MagicMock +import pytest + +class TestFactory: + def pull_minminizer(self, minimizer: AvailableMinimizers) -> MinimizerBase: + mock_fit_object = MagicMock() + mock_fit_function = MagicMock() + minimizer = factory(minimizer, mock_fit_object, mock_fit_function) + return minimizer + + @pytest.mark.parametrize('minimizer_method,minimizer_enum', [('leastsq', AvailableMinimizers.LMFit), ('leastsq', AvailableMinimizers.LMFit_leastsq), ('powell', AvailableMinimizers.LMFit_powell), ('cobyla', AvailableMinimizers.LMFit_cobyla)]) + def test_factory_lm_fit(self, minimizer_method, minimizer_enum): + minimizer = self.pull_minminizer(minimizer_enum) + assert minimizer._method == minimizer_method + assert minimizer.wrapping == 'lmfit' + + @pytest.mark.parametrize('minimizer_method,minimizer_enum', [('amoeba', AvailableMinimizers.Bumps), ('amoeba', AvailableMinimizers.Bumps_simplex), ('newton', AvailableMinimizers.Bumps_newton), ('lm', AvailableMinimizers.Bumps_lm)]) + def test_factory_bumps_fit(self, minimizer_method, minimizer_enum): + minimizer = self.pull_minminizer(minimizer_enum) + assert minimizer._method == minimizer_method + assert minimizer.wrapping == 'bumps' + + @pytest.mark.parametrize('minimizer_method,minimizer_enum', [('leastsq', AvailableMinimizers.DFO), ('leastsq', AvailableMinimizers.DFO_leastsq)]) + def test_factory_dfo_fit(self, minimizer_method, minimizer_enum): + minimizer = self.pull_minminizer(minimizer_enum) + assert minimizer._method == minimizer_method + assert minimizer.wrapping == 'dfo' + + +@pytest.mark.parametrize('minimizer_name,expected', [('lmfit', AvailableMinimizers.LMFit), ('lmfit-leastsq', AvailableMinimizers.LMFit_leastsq), ('lmfit-powell', AvailableMinimizers.LMFit_powell), ('lmfit-cobyla', AvailableMinimizers.LMFit_cobyla), ]) +def test_from_string_to_enum_lmfit(minimizer_name, expected): + assert from_string_to_enum(minimizer_name) == expected + + +@pytest.mark.parametrize('minimizer_name,expected', [('bumps', AvailableMinimizers.Bumps), ('bumps-simplex', AvailableMinimizers.Bumps_simplex), ('bumps-newton', AvailableMinimizers.Bumps_newton), ('bumps-lm', AvailableMinimizers.Bumps_lm)]) +def test_from_string_to_enum_bumps(minimizer_name, expected): + assert from_string_to_enum(minimizer_name) == expected + + +@pytest.mark.parametrize('minimizer_name,expected', [('dfo', AvailableMinimizers.DFO), ('dfo-leastsq', AvailableMinimizers.DFO_leastsq)]) +def test_from_string_to_enum_dfo(minimizer_name, expected): + assert from_string_to_enum(minimizer_name) == expected + + +def test_available_minimizers(): + assert AvailableMinimizers.LMFit + assert AvailableMinimizers.LMFit_leastsq + assert AvailableMinimizers.LMFit_powell + assert AvailableMinimizers.LMFit_cobyla + assert AvailableMinimizers.Bumps + assert AvailableMinimizers.Bumps_simplex + assert AvailableMinimizers.Bumps_newton + assert AvailableMinimizers.Bumps_lm + assert AvailableMinimizers.DFO + assert AvailableMinimizers.DFO_leastsq + assert len(AvailableMinimizers) == 10 \ No newline at end of file diff --git a/tests/unit_tests/Fitting/test_constraints.py b/tests/unit_tests/Fitting/test_constraints.py index 60f37fa0..040b706a 100644 --- a/tests/unit_tests/Fitting/test_constraints.py +++ b/tests/unit_tests/Fitting/test_constraints.py @@ -10,8 +10,8 @@ import pytest -from easyscience.Fitting.Constraints import NumericConstraint -from easyscience.Fitting.Constraints import ObjConstraint +from easyscience.fitting.Constraints import NumericConstraint +from easyscience.fitting.Constraints import ObjConstraint from easyscience.Objects.Variable import Parameter diff --git a/tests/unit_tests/Fitting/test_fitter.py b/tests/unit_tests/Fitting/test_fitter.py new file mode 100644 index 00000000..4669400b --- /dev/null +++ b/tests/unit_tests/Fitting/test_fitter.py @@ -0,0 +1,251 @@ +from unittest.mock import MagicMock + +import pytest +import numpy as np +from easyscience.fitting.fitter import Fitter +import easyscience.fitting.fitter + + +class TestFitter(): + @pytest.fixture + def fitter(self, monkeypatch): + monkeypatch.setattr(Fitter, '_update_minimizer', MagicMock()) + self.mock_fit_object = MagicMock() + self.mock_fit_function = MagicMock() + return Fitter(self.mock_fit_object, self.mock_fit_function) + + def test_constructor(self, fitter: Fitter): + # When Then Expect + assert fitter._fit_object == self.mock_fit_object + assert fitter._fit_function == self.mock_fit_function + assert fitter._dependent_dims is None + assert fitter._name_current_minimizer == 'lmfit-leastsq' + fitter._update_minimizer.assert_called_once_with('lmfit-leastsq') + + def test_fit_constraints(self, fitter: Fitter): + # When + mock_minimizer = MagicMock() + mock_minimizer.fit_constraints = MagicMock(return_value='constraints') + fitter._minimizer = mock_minimizer + + # Then + constraints = fitter.fit_constraints() + + # Expect + assert constraints == 'constraints' + + def test_add_fit_constraint(self, fitter: Fitter): + # When + mock_minimizer = MagicMock() + mock_minimizer.add_fit_constraint = MagicMock() + fitter._minimizer = mock_minimizer + + # Then + fitter.add_fit_constraint('constraints') + + # Expect + mock_minimizer.add_fit_constraint.assert_called_once_with('constraints') + + def test_remove_fit_constraint(self, fitter: Fitter): + # When + mock_minimizer = MagicMock() + mock_minimizer.remove_fit_constraint = MagicMock() + fitter._minimizer = mock_minimizer + + # Then + fitter.remove_fit_constraint(10) + + # Expect + mock_minimizer.remove_fit_constraint.assert_called_once_with(10) + + def test_make_model(self, fitter: Fitter): + # When + mock_minimizer = MagicMock() + mock_minimizer.make_model = MagicMock(return_value='model') + fitter._minimizer = mock_minimizer + + # Then + model = fitter.make_model('pars') + + # Expect + assert model == 'model' + mock_minimizer.make_model.assert_called_once_with('pars') + + def test_evaluate(self, fitter: Fitter): + # When + mock_minimizer = MagicMock() + mock_minimizer.evaluate = MagicMock(return_value='result') + fitter._minimizer = mock_minimizer + + # Then + result = fitter.evaluate('pars') + + # Expect + assert result == 'result' + mock_minimizer.evaluate.assert_called_once_with('pars') + + def test_convert_to_pars_obj(self, fitter: Fitter): + # When + mock_minimizer = MagicMock() + mock_minimizer.convert_to_pars_obj = MagicMock(return_value='obj') + fitter._minimizer = mock_minimizer + + # Then + obj = fitter.convert_to_pars_obj('pars') + + # Expect + assert obj == 'obj' + mock_minimizer.convert_to_pars_obj.assert_called_once_with('pars') + + def test_initialize(self, fitter: Fitter): + # When + mock_fit_object = MagicMock() + mock_fit_function = MagicMock() + + # Then + fitter.initialize(mock_fit_object, mock_fit_function) + + # Expect + assert fitter._fit_object == mock_fit_object + assert fitter._fit_function == mock_fit_function + fitter._update_minimizer.count(2) + + def test_create(self, fitter: Fitter): + # When + fitter._update_minimizer = MagicMock() + + # Then + fitter.create('great-minimizer') + + # Expect + fitter._update_minimizer.assert_called_once_with('great-minimizer') + + def test_switch_minimizer(self, fitter: Fitter): + # When + mock_minimizer = MagicMock() + mock_minimizer.fit_constraints = MagicMock(return_value='constraints') + mock_minimizer.set_fit_constraint = MagicMock() + fitter._minimizer = mock_minimizer + + # Then + fitter.switch_minimizer('great-minimizer') + + # Expect + fitter._update_minimizer.count(2) + mock_minimizer.set_fit_constraint.assert_called_once_with('constraints') + mock_minimizer.fit_constraints.assert_called_once() + + def test_update_minimizer(self, monkeypatch): + # When + mock_fit_object = MagicMock() + mock_fit_function = MagicMock() + + mock_string_to_enum = MagicMock(return_value=10) + mock_factory = MagicMock(return_value='minimizer') + monkeypatch.setattr(easyscience.fitting.fitter, 'from_string_to_enum', mock_string_to_enum) + monkeypatch.setattr(easyscience.fitting.fitter, 'factory', mock_factory) + fitter = Fitter(mock_fit_object, mock_fit_function) + + # Then + fitter._update_minimizer('great-minimizer') + + # Expect + assert fitter._name_current_minimizer == 'great-minimizer' + assert fitter._minimizer == 'minimizer' + + def test_available_minimizers(self, fitter: Fitter): + # When + minimizers = fitter.available_minimizers + + # Then Expect + assert minimizers == [ + 'LMFit', 'LMFit_leastsq', 'LMFit_powell', 'LMFit_cobyla', + 'Bumps', 'Bumps_simplex', 'Bumps_newton', 'Bumps_lm', + 'DFO', 'DFO_leastsq' + ] + + def test_minimizer(self, fitter: Fitter): + # When + fitter._minimizer = 'minimizer' + + # Then + minimizer = fitter.minimizer + + # Expect + assert minimizer == 'minimizer' + + def test_fit_function(self, fitter: Fitter): + # When Then + fit_function = fitter.fit_function + + # Expect + assert fit_function == self.mock_fit_function + + def test_set_fit_function(self, fitter: Fitter): + # When + fitter._name_current_minimizer = 'current_minimizer' + + # Then + fitter.fit_function = 'new-fit-function' + + # Expect + assert fitter._fit_function == 'new-fit-function' + fitter._update_minimizer.assert_called_with('current_minimizer') + + def test_fit_object(self, fitter: Fitter): + # When Then + fit_object = fitter.fit_object + + # Expect + assert fit_object == self.mock_fit_object + + def test_set_fit_object(self, fitter: Fitter): + # When + fitter._name_current_minimizer = 'current_minimizer' + + # Then + fitter.fit_object = 'new-fit-object' + + # Expect + assert fitter.fit_object == 'new-fit-object' + fitter._update_minimizer.assert_called_with('current_minimizer') + + def test_fit(self, fitter: Fitter): + # When + fitter._precompute_reshaping = MagicMock(return_value=('x_fit', 'x_new', 'y_new', 'weights', 'dims')) + fitter._fit_function_wrapper = MagicMock(return_value='wrapped_fit_function') + fitter._post_compute_reshaping = MagicMock(return_value='fit_result') + fitter._minimizer = MagicMock() + fitter._minimizer.fit = MagicMock(return_value='result') + + # Then + result = fitter.fit('x', 'y', 'weights', 'vectorized') + + # Expect + fitter._precompute_reshaping.assert_called_once_with('x', 'y', 'weights', 'vectorized') + fitter._fit_function_wrapper.assert_called_once_with('x_new', flatten=True) + fitter._post_compute_reshaping.assert_called_once_with('result', 'x', 'y') + assert result == 'fit_result' + assert fitter._dependent_dims == 'dims' + assert fitter._fit_function == self.mock_fit_function + + def test_post_compute_reshaping(self, fitter: Fitter): + # When + fit_result = MagicMock() + fit_result.y_calc = np.array([[10], [20], [30]]) + fit_result.y_err = np.array([[40], [50], [60]]) + x = np.array([1, 2, 3]) + y = np.array([4, 5, 6]) + + # Then + result = fitter._post_compute_reshaping(fit_result, x, y) + + # Expect + assert np.array_equal(result.y_calc, np.array([10, 20, 30])) + assert np.array_equal(result.y_err, np.array([40, 50, 60])) + assert np.array_equal(result.x, x) + assert np.array_equal(result.y_obs, y) + +# TODO +# def test_fit_function_wrapper() +# def test_precompute_reshaping() diff --git a/tests/unit_tests/Objects/test_Groups.py b/tests/unit_tests/Objects/test_Groups.py index e0b9757c..5e483bee 100644 --- a/tests/unit_tests/Objects/test_Groups.py +++ b/tests/unit_tests/Objects/test_Groups.py @@ -394,7 +394,7 @@ def test_baseCollection_constraints(cls): p1 = Parameter("p1", 1) p2 = Parameter("p2", 2) - from easyscience.Fitting.Constraints import ObjConstraint + from easyscience.fitting.Constraints import ObjConstraint p2.user_constraints["testing"] = ObjConstraint(p2, "2*", p1)