diff --git a/src/easyscience/fitting/minimizers/minimizer_bumps.py b/src/easyscience/fitting/minimizers/minimizer_bumps.py index 7fdde0e8..4a3e8802 100644 --- a/src/easyscience/fitting/minimizers/minimizer_bumps.py +++ b/src/easyscience/fitting/minimizers/minimizer_bumps.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: BSD-3-Clause import copy +import warnings from typing import Callable from typing import List @@ -364,8 +365,8 @@ def _gen_fit_results( :return: fit results container :rtype: FitResults """ - results = FitResults() + for name, value in kwargs.items(): if getattr(results, name, False): setattr(results, name, value) @@ -375,6 +376,11 @@ def _gen_fit_results( if max_evaluations is not None and n_evaluations >= max_evaluations - 1: results.success = False results.message = f'Maximum number of evaluations ({max_evaluations}) reached' + warnings.warn( + f'Fit did not converge within the maximum number of evaluations ({max_evaluations}). ' + 'Consider increasing the maximum number of evaluations or adjusting the tolerance.', + UserWarning, + ) else: results.success = True results.message = 'Optimization terminated successfully' diff --git a/src/easyscience/fitting/minimizers/minimizer_dfo.py b/src/easyscience/fitting/minimizers/minimizer_dfo.py index 968dca71..259a52ea 100644 --- a/src/easyscience/fitting/minimizers/minimizer_dfo.py +++ b/src/easyscience/fitting/minimizers/minimizer_dfo.py @@ -361,6 +361,8 @@ def _gen_fit_results(self, fit_results, weights, **kwargs) -> FitResults: for name, value in kwargs.items(): if getattr(results, name, False): setattr(results, name, value) + # DFO-LS stores fixed exit-code constants on each result object; + # EXIT_SUCCESS is 0 and EXIT_MAXFUN_WARNING keeps a different flag value. results.success = fit_results.flag == fit_results.EXIT_SUCCESS pars = {} @@ -417,6 +419,8 @@ def _dfo_fit( results = dfols.solve(model, pars_values, bounds=bounds, **kwargs) + # DFO-LS uses EXIT_MAXFUN_WARNING when it stops on the evaluation budget; + # we still return the partial fit result and let the unified result mark it as non-success. if results.flag in {results.EXIT_SUCCESS, results.EXIT_MAXFUN_WARNING}: return results diff --git a/src/easyscience/fitting/minimizers/minimizer_lmfit.py b/src/easyscience/fitting/minimizers/minimizer_lmfit.py index dbaea071..8bd95c63 100644 --- a/src/easyscience/fitting/minimizers/minimizer_lmfit.py +++ b/src/easyscience/fitting/minimizers/minimizer_lmfit.py @@ -1,6 +1,7 @@ # SPDX-FileCopyrightText: 2026 EasyScience contributors # SPDX-License-Identifier: BSD-3-Clause +import warnings from typing import Callable from typing import List @@ -341,6 +342,8 @@ def _gen_fit_results(self, fit_results: ModelResult, **kwargs) -> FitResults: results.y_err = 1 / fit_results.weights results.n_evaluations = fit_results.nfev results.message = fit_results.message + if fit_results.success is False and fit_results.message: + warnings.warn(str(fit_results.message), UserWarning) results.minimizer_engine = self.__class__ results.fit_args = None diff --git a/tests/unit/fitting/minimizers/test_minimizer_bumps.py b/tests/unit/fitting/minimizers/test_minimizer_bumps.py index 2515165d..19dc1579 100644 --- a/tests/unit/fitting/minimizers/test_minimizer_bumps.py +++ b/tests/unit/fitting/minimizers/test_minimizer_bumps.py @@ -340,6 +340,83 @@ def test_fit_progress_callback(self, minimizer: Bumps, monkeypatch) -> None: assert monitors[1]._callback is progress_callback assert monitors[1]._payload_builder == minimizer._build_progress_payload + def test_fit_uses_supplied_model_and_optional_kwargs( + self, minimizer: Bumps, monkeypatch + ) -> None: + from easyscience import global_object + + global_object.stack.enabled = False + + mock_driver_instance = MagicMock() + mock_driver_instance.fit.return_value = (np.array([3.0]), 0.5) + mock_driver_instance.stderr.return_value = np.array([0.1]) + mock_driver_instance.clip = MagicMock() + mock_FitDriver = MagicMock(return_value=mock_driver_instance) + monkeypatch.setattr( + easyscience.fitting.minimizers.minimizer_bumps, 'FitDriver', mock_FitDriver + ) + + mock_bumps_param = MagicMock() + mock_bumps_param.name = 'pmock_parm_1' + mock_problem = MagicMock() + mock_problem._parameters = [mock_bumps_param] + monkeypatch.setattr( + easyscience.fitting.minimizers.minimizer_bumps, + 'FitProblem', + MagicMock(return_value=mock_problem), + ) + + minimizer._make_model = MagicMock() + minimizer._gen_fit_results = MagicMock(return_value='gen_fit_results') + minimizer._resolve_fitclass = MagicMock(return_value=MagicMock(id='amoeba')) + minimizer._set_parameter_fit_result = MagicMock() + minimizer._cached_pars = {'mock_parm_1': MagicMock(value=1.0)} + minimizer._cached_pars_vals = {'mock_parm_1': (1.0, 0.0)} + + supplied_model = MagicMock() + minimizer_kwargs = {'existing_option': 'minimizer'} + engine_kwargs = {'engine_option': 'engine'} + + result = minimizer.fit( + x=np.array([1.0]), + y=np.array([2.0]), + weights=np.array([1.0]), + model=supplied_model, + tolerance=0.25, + max_evaluations=7, + minimizer_kwargs=minimizer_kwargs, + engine_kwargs=engine_kwargs, + ) + + assert result == 'gen_fit_results' + minimizer._make_model.assert_not_called() + fit_driver_kwargs = mock_FitDriver.call_args.kwargs + assert fit_driver_kwargs['problem'] is mock_problem + assert fit_driver_kwargs['existing_option'] == 'minimizer' + assert fit_driver_kwargs['engine_option'] == 'engine' + assert fit_driver_kwargs['ftol'] == 0.25 + assert fit_driver_kwargs['xtol'] == 0.25 + assert fit_driver_kwargs['steps'] == 7 + + def test_fit_rejects_non_callable_progress_callback( + self, minimizer: Bumps, monkeypatch + ) -> None: + monkeypatch.setattr( + easyscience.fitting.minimizers.minimizer_bumps, + 'FitProblem', + MagicMock(return_value=MagicMock()), + ) + minimizer._resolve_fitclass = MagicMock(return_value=MagicMock(id='amoeba')) + + with pytest.raises(ValueError, match='progress_callback must be callable'): + minimizer.fit( + x=np.array([1.0]), + y=np.array([2.0]), + weights=np.array([1.0]), + model=MagicMock(), + progress_callback='not-callable', + ) + def test_build_progress_payload(self, minimizer: Bumps) -> None: # When mock_problem = MagicMock() @@ -429,6 +506,47 @@ def test_current_parameter_snapshot(self, minimizer: Bumps) -> None: # Expect assert snapshot == {'alpha': 1.5, 'beta': 2.5} + @pytest.mark.parametrize('par_list', [None, [MagicMock(unique_name='alpha')]]) + def test_convert_to_pars_obj_optional_parameter_list( + self, minimizer: Bumps, par_list, monkeypatch + ) -> None: + object_parameters = [MagicMock(unique_name='beta')] + minimizer._object = MagicMock() + minimizer._object.get_fit_parameters = MagicMock(return_value=object_parameters) + monkeypatch.setattr( + Bumps, + 'convert_to_par_object', + staticmethod(lambda parameter: parameter.unique_name), + ) + + converted = minimizer.convert_to_pars_obj(par_list) + + expected_parameters = object_parameters if par_list is None else par_list + assert converted == [parameter.unique_name for parameter in expected_parameters] + if par_list is None: + minimizer._object.get_fit_parameters.assert_called_once_with() + else: + minimizer._object.get_fit_parameters.assert_not_called() + + def test_make_model_without_parameters_uses_cached_parameters( + self, minimizer: Bumps, monkeypatch + ) -> None: + minimizer._generate_fit_function = MagicMock( + return_value=MagicMock(return_value=np.array([2.0])) + ) + minimizer._cached_pars = {'alpha': MagicMock(value=1.0)} + minimizer.convert_to_par_object = MagicMock(return_value='converted-alpha') + + mock_curve = MagicMock(return_value='curve') + monkeypatch.setattr(easyscience.fitting.minimizers.minimizer_bumps, 'Curve', mock_curve) + + model = minimizer._make_model() + curve = model(np.array([1.0]), np.array([2.0]), np.array([3.0])) + + assert curve == 'curve' + minimizer.convert_to_par_object.assert_called_once_with(minimizer._cached_pars['alpha']) + assert mock_curve.call_args.kwargs['palpha'] == 'converted-alpha' + def test_bumps_progress_monitor_calls_callback(self, minimizer: Bumps) -> None: # When callback = MagicMock(return_value=True) diff --git a/tests/unit/fitting/minimizers/test_minimizer_dfo.py b/tests/unit/fitting/minimizers/test_minimizer_dfo.py index 66be2849..d572f228 100644 --- a/tests/unit/fitting/minimizers/test_minimizer_dfo.py +++ b/tests/unit/fitting/minimizers/test_minimizer_dfo.py @@ -104,6 +104,68 @@ def test_fit_passes_callback_to_model_builder(self, minimizer: DFO) -> None: callback_on_improvement_only=False, ) + def test_fit_wraps_supplied_model_with_explicit_callback(self, minimizer: DFO) -> None: + from easyscience import global_object + + global_object.stack.enabled = False + + supplied_model = MagicMock() + wrapped_model = MagicMock() + explicit_callback = MagicMock() + + minimizer._make_model = MagicMock() + minimizer._wrap_model_with_callback = MagicMock(return_value=wrapped_model) + minimizer._get_callback_parameter_names = MagicMock(return_value=['palpha']) + minimizer._dfo_fit = MagicMock(return_value='fit') + minimizer._set_parameter_fit_result = MagicMock() + minimizer._gen_fit_results = MagicMock(return_value='gen_fit_results') + minimizer._cached_pars = {'alpha': MagicMock(value=1.0)} + + result = minimizer.fit( + x=np.array([1.0]), + y=np.array([2.0]), + weights=np.array([1.0]), + model=supplied_model, + callback=explicit_callback, + ) + + assert result == 'gen_fit_results' + minimizer._make_model.assert_not_called() + minimizer._wrap_model_with_callback.assert_called_once_with( + supplied_model, + ['palpha'], + explicit_callback, + 1, + False, + ) + minimizer._dfo_fit.assert_called_once_with(minimizer._cached_pars, wrapped_model) + + def test_fit_uses_supplied_model_without_callback(self, minimizer: DFO) -> None: + from easyscience import global_object + + global_object.stack.enabled = False + + supplied_model = MagicMock() + + minimizer._make_model = MagicMock() + minimizer._wrap_model_with_callback = MagicMock() + minimizer._dfo_fit = MagicMock(return_value='fit') + minimizer._set_parameter_fit_result = MagicMock() + minimizer._gen_fit_results = MagicMock(return_value='gen_fit_results') + minimizer._cached_pars = {'alpha': MagicMock(value=1.0)} + + result = minimizer.fit( + x=np.array([1.0]), + y=np.array([2.0]), + weights=np.array([1.0]), + model=supplied_model, + ) + + assert result == 'gen_fit_results' + minimizer._make_model.assert_not_called() + minimizer._wrap_model_with_callback.assert_not_called() + minimizer._dfo_fit.assert_called_once_with(minimizer._cached_pars, supplied_model) + def test_generate_fit_function(self, minimizer: DFO) -> None: # When minimizer._original_fit_function = MagicMock(return_value='fit_function_result') @@ -248,6 +310,22 @@ def test_make_model_callback_every(self, minimizer: DFO) -> None: assert state.evaluation == 2 assert all(state.xk == np.array([1222, 2333])) + def test_make_model_without_parameters_uses_cached_parameters(self, minimizer: DFO) -> None: + mock_fit_function = MagicMock(return_value=np.array([11.0])) + minimizer._generate_fit_function = MagicMock(return_value=mock_fit_function) + minimizer._cached_pars = {'alpha': MagicMock(value=1000.0)} + + model = minimizer._make_model() + residuals_for_model = model( + x=np.array([1.0]), + y=np.array([10.0]), + weights=np.array([0.5]), + ) + + residuals_for_model(np.array([1111.0])) + + assert mock_fit_function.call_args.kwargs == {'palpha': 1111.0} + @pytest.mark.parametrize('callback_every', [0, 1.3]) def test_fit_callback_every_must_be_positive(self, minimizer: DFO, callback_every) -> None: with pytest.raises(ValueError, match='callback_every must be a positive integer'): @@ -595,6 +673,50 @@ def test_progress_callback_not_used_when_explicit_callback_given(self, minimizer call_kwargs = minimizer._make_model.call_args[1] assert call_kwargs['callback'] is explicit_cb + @pytest.mark.parametrize( + ('parameters', 'expected_names'), + [ + ([MagicMock(unique_name='alpha')], ['palpha']), + (None, ['pbeta']), + ], + ) + def test_get_callback_parameter_names_optional_parameters( + self, minimizer: DFO, parameters, expected_names + ) -> None: + minimizer._cached_pars = {'beta': MagicMock(value=1.0)} + + parameter_names = minimizer._get_callback_parameter_names(parameters) + + assert parameter_names == expected_names + + def test_wrap_model_with_callback_improvement_only(self, minimizer: DFO) -> None: + callback = MagicMock() + wrapped_model = minimizer._wrap_model_with_callback( + lambda pars_values: np.asarray([pars_values[0] - 1.0]), + ['palpha'], + callback, + callback_every=1, + callback_on_improvement_only=True, + ) + + wrapped_model([0.5]) + + callback.assert_called_once() + assert callback.call_args.args[0].improved is True + + def test_prepare_kwargs_with_optional_arguments(self, minimizer: DFO) -> None: + kwargs = minimizer._prepare_kwargs(tolerance=0.05, max_evaluations=11, keep=True) + + assert kwargs == { + 'keep': True, + 'maxfun': 11, + 'rhoend': 0.05, + } + + def test_prepare_kwargs_rejects_large_tolerance(self, minimizer: DFO) -> None: + with pytest.raises(ValueError, match='Tolerance must be equal or smaller than 0.1'): + minimizer._prepare_kwargs(tolerance=0.2) + def test_make_progress_adapter_payload_format(self) -> None: """The adapter must produce the standard progress payload dict.""" progress_cb = MagicMock() diff --git a/tests/unit/fitting/minimizers/test_minimizer_lmfit.py b/tests/unit/fitting/minimizers/test_minimizer_lmfit.py index 6e43c08f..448cc7ea 100644 --- a/tests/unit/fitting/minimizers/test_minimizer_lmfit.py +++ b/tests/unit/fitting/minimizers/test_minimizer_lmfit.py @@ -1,6 +1,7 @@ # SPDX-FileCopyrightText: 2026 EasyScience contributors # SPDX-License-Identifier: BSD-3-Clause +import warnings from unittest.mock import MagicMock import numpy as np @@ -272,6 +273,45 @@ def test_fit_exception(self, minimizer: LMFit) -> None: with pytest.raises(FitError): minimizer.fit(x=1.0, y=2.0, weights=1) + def test_gen_fit_results_populates_evaluation_metadata(self, minimizer: LMFit) -> None: + fit_results = MagicMock() + fit_results.success = False + fit_results.data = 'data' + fit_results.userkws = {'x': 'x'} + fit_results.values = {'p1': 1.0} + fit_results.init_values = {'p1': 0.5} + fit_results.best_fit = 'best_fit' + fit_results.weights = 2 + fit_results.nfev = 9 + fit_results.message = 'max evaluations reached' + + with pytest.warns(UserWarning, match='max evaluations reached'): + result = minimizer._gen_fit_results(fit_results) + + assert result.success is False + assert result.n_evaluations == 9 + assert result.message == 'max evaluations reached' + assert result.engine_result == fit_results + + def test_gen_fit_results_success_does_not_warn(self, minimizer: LMFit) -> None: + fit_results = MagicMock() + fit_results.success = True + fit_results.data = 'data' + fit_results.userkws = {'x': 'x'} + fit_results.values = {'p1': 1.0} + fit_results.init_values = {'p1': 0.5} + fit_results.best_fit = 'best_fit' + fit_results.weights = 2 + fit_results.nfev = 3 + fit_results.message = 'success' + + with warnings.catch_warnings(record=True) as record: + warnings.simplefilter('always') + result = minimizer._gen_fit_results(fit_results) + + assert len(record) == 0 + assert result.success is True + def test_convert_to_pars_obj(self, minimizer: LMFit, monkeypatch) -> None: # When minimizer._object = MagicMock() diff --git a/tests/unit/fitting/minimizers/test_utils.py b/tests/unit/fitting/minimizers/test_utils.py new file mode 100644 index 00000000..f9227852 --- /dev/null +++ b/tests/unit/fitting/minimizers/test_utils.py @@ -0,0 +1,85 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause + +import numpy as np +import pytest + +from easyscience.fitting.minimizers.utils import FitResults + + +class TestFitResultsRepr: + def _make_result(self, **overrides): + r = FitResults() + r.success = True + r.x = np.array([1.0, 2.0, 3.0]) + r.y_obs = np.array([1.0, 2.0, 3.0]) + r.y_calc = np.array([1.1, 1.9, 3.05]) + r.y_err = np.array([0.1, 0.1, 0.1]) + r.p = {'pa': 1.234, 'pb': 5.678} + r.n_evaluations = 42 + r.minimizer_engine = type('Bumps', (), {'__name__': 'Bumps'}) + for k, v in overrides.items(): + setattr(r, k, v) + return r + + def test_repr_contains_success(self): + r = self._make_result() + assert 'success=True' in repr(r) + + def test_repr_contains_n_pars_and_n_points(self): + r = self._make_result() + text = repr(r) + assert 'n_pars=2' in text + assert 'n_points=3' in text + + def test_repr_contains_chi2_values(self): + r = self._make_result() + text = repr(r) + assert 'chi2=' in text + assert 'reduced_chi2=' in text + assert 'N/A' not in text + + def test_repr_shows_na_when_chi2_cannot_be_computed(self): + r = self._make_result(y_err=np.array([0.0, 0.0, 0.0])) + text = repr(r) + assert 'chi2=N/A' in text + assert 'reduced_chi2=N/A' in text + + def test_repr_contains_n_evaluations(self): + r = self._make_result() + assert 'n_evaluations=42' in repr(r) + + def test_repr_contains_minimizer_name(self): + r = self._make_result() + assert 'minimizer=Bumps' in repr(r) + + def test_repr_minimizer_none(self): + r = self._make_result(minimizer_engine=None) + assert 'minimizer=None' in repr(r) + + def test_repr_includes_message_when_set(self): + r = self._make_result(message='Fit stopped: reached maximum evaluations (3)') + assert 'Fit stopped: reached maximum evaluations (3)' in repr(r) + + def test_repr_omits_message_when_empty(self): + r = self._make_result(message='') + assert 'message' not in repr(r) + + def test_repr_includes_parameters(self): + r = self._make_result() + text = repr(r) + assert 'pa=1.234' in text + assert 'pb=5.678' in text + + def test_repr_omits_parameters_when_empty(self): + r = self._make_result(p={}) + assert 'parameters' not in repr(r) + + def test_repr_default_fit_results(self): + r = FitResults() + text = repr(r) + assert 'success=False' in text + assert 'n_pars=0' in text + assert 'n_points=0' in text + assert 'n_evaluations=None' in text + assert 'chi2=N/A' in text