Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion src/easyscience/fitting/minimizers/minimizer_bumps.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
# SPDX-License-Identifier: BSD-3-Clause

import copy
import warnings
from typing import Callable
from typing import List

Expand Down Expand Up @@ -364,8 +365,8 @@ def _gen_fit_results(
:return: fit results container
:rtype: FitResults
"""

results = FitResults()

for name, value in kwargs.items():
if getattr(results, name, False):
setattr(results, name, value)
Expand All @@ -375,6 +376,11 @@ def _gen_fit_results(
if max_evaluations is not None and n_evaluations >= max_evaluations - 1:
results.success = False
results.message = f'Maximum number of evaluations ({max_evaluations}) reached'
warnings.warn(
f'Fit did not converge within the maximum number of evaluations ({max_evaluations}). '
'Consider increasing the maximum number of evaluations or adjusting the tolerance.',
UserWarning,
)
else:
results.success = True
results.message = 'Optimization terminated successfully'
Expand Down
4 changes: 4 additions & 0 deletions src/easyscience/fitting/minimizers/minimizer_dfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,8 @@ def _gen_fit_results(self, fit_results, weights, **kwargs) -> FitResults:
for name, value in kwargs.items():
if getattr(results, name, False):
setattr(results, name, value)
# DFO-LS stores fixed exit-code constants on each result object;
# EXIT_SUCCESS is 0 and EXIT_MAXFUN_WARNING keeps a different flag value.
results.success = fit_results.flag == fit_results.EXIT_SUCCESS

pars = {}
Expand Down Expand Up @@ -417,6 +419,8 @@ def _dfo_fit(

results = dfols.solve(model, pars_values, bounds=bounds, **kwargs)

# DFO-LS uses EXIT_MAXFUN_WARNING when it stops on the evaluation budget;
# we still return the partial fit result and let the unified result mark it as non-success.
if results.flag in {results.EXIT_SUCCESS, results.EXIT_MAXFUN_WARNING}:
return results

Expand Down
3 changes: 3 additions & 0 deletions src/easyscience/fitting/minimizers/minimizer_lmfit.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# SPDX-FileCopyrightText: 2026 EasyScience contributors <https://github.com/easyscience>
# SPDX-License-Identifier: BSD-3-Clause

import warnings
from typing import Callable
from typing import List

Expand Down Expand Up @@ -341,6 +342,8 @@ def _gen_fit_results(self, fit_results: ModelResult, **kwargs) -> FitResults:
results.y_err = 1 / fit_results.weights
results.n_evaluations = fit_results.nfev
results.message = fit_results.message
if fit_results.success is False and fit_results.message:
warnings.warn(str(fit_results.message), UserWarning)
results.minimizer_engine = self.__class__
results.fit_args = None

Expand Down
118 changes: 118 additions & 0 deletions tests/unit/fitting/minimizers/test_minimizer_bumps.py
Original file line number Diff line number Diff line change
Expand Up @@ -340,6 +340,83 @@ def test_fit_progress_callback(self, minimizer: Bumps, monkeypatch) -> None:
assert monitors[1]._callback is progress_callback
assert monitors[1]._payload_builder == minimizer._build_progress_payload

def test_fit_uses_supplied_model_and_optional_kwargs(
self, minimizer: Bumps, monkeypatch
) -> None:
from easyscience import global_object

global_object.stack.enabled = False

mock_driver_instance = MagicMock()
mock_driver_instance.fit.return_value = (np.array([3.0]), 0.5)
mock_driver_instance.stderr.return_value = np.array([0.1])
mock_driver_instance.clip = MagicMock()
mock_FitDriver = MagicMock(return_value=mock_driver_instance)
monkeypatch.setattr(
easyscience.fitting.minimizers.minimizer_bumps, 'FitDriver', mock_FitDriver
)

mock_bumps_param = MagicMock()
mock_bumps_param.name = 'pmock_parm_1'
mock_problem = MagicMock()
mock_problem._parameters = [mock_bumps_param]
monkeypatch.setattr(
easyscience.fitting.minimizers.minimizer_bumps,
'FitProblem',
MagicMock(return_value=mock_problem),
)

minimizer._make_model = MagicMock()
minimizer._gen_fit_results = MagicMock(return_value='gen_fit_results')
minimizer._resolve_fitclass = MagicMock(return_value=MagicMock(id='amoeba'))
minimizer._set_parameter_fit_result = MagicMock()
minimizer._cached_pars = {'mock_parm_1': MagicMock(value=1.0)}
minimizer._cached_pars_vals = {'mock_parm_1': (1.0, 0.0)}

supplied_model = MagicMock()
minimizer_kwargs = {'existing_option': 'minimizer'}
engine_kwargs = {'engine_option': 'engine'}

result = minimizer.fit(
x=np.array([1.0]),
y=np.array([2.0]),
weights=np.array([1.0]),
model=supplied_model,
tolerance=0.25,
max_evaluations=7,
minimizer_kwargs=minimizer_kwargs,
engine_kwargs=engine_kwargs,
)

assert result == 'gen_fit_results'
minimizer._make_model.assert_not_called()
fit_driver_kwargs = mock_FitDriver.call_args.kwargs
assert fit_driver_kwargs['problem'] is mock_problem
assert fit_driver_kwargs['existing_option'] == 'minimizer'
assert fit_driver_kwargs['engine_option'] == 'engine'
assert fit_driver_kwargs['ftol'] == 0.25
assert fit_driver_kwargs['xtol'] == 0.25
assert fit_driver_kwargs['steps'] == 7

def test_fit_rejects_non_callable_progress_callback(
self, minimizer: Bumps, monkeypatch
) -> None:
monkeypatch.setattr(
easyscience.fitting.minimizers.minimizer_bumps,
'FitProblem',
MagicMock(return_value=MagicMock()),
)
minimizer._resolve_fitclass = MagicMock(return_value=MagicMock(id='amoeba'))

with pytest.raises(ValueError, match='progress_callback must be callable'):
minimizer.fit(
x=np.array([1.0]),
y=np.array([2.0]),
weights=np.array([1.0]),
model=MagicMock(),
progress_callback='not-callable',
)

def test_build_progress_payload(self, minimizer: Bumps) -> None:
# When
mock_problem = MagicMock()
Expand Down Expand Up @@ -429,6 +506,47 @@ def test_current_parameter_snapshot(self, minimizer: Bumps) -> None:
# Expect
assert snapshot == {'alpha': 1.5, 'beta': 2.5}

@pytest.mark.parametrize('par_list', [None, [MagicMock(unique_name='alpha')]])
def test_convert_to_pars_obj_optional_parameter_list(
self, minimizer: Bumps, par_list, monkeypatch
) -> None:
object_parameters = [MagicMock(unique_name='beta')]
minimizer._object = MagicMock()
minimizer._object.get_fit_parameters = MagicMock(return_value=object_parameters)
monkeypatch.setattr(
Bumps,
'convert_to_par_object',
staticmethod(lambda parameter: parameter.unique_name),
)

converted = minimizer.convert_to_pars_obj(par_list)

expected_parameters = object_parameters if par_list is None else par_list
assert converted == [parameter.unique_name for parameter in expected_parameters]
if par_list is None:
minimizer._object.get_fit_parameters.assert_called_once_with()
else:
minimizer._object.get_fit_parameters.assert_not_called()

def test_make_model_without_parameters_uses_cached_parameters(
self, minimizer: Bumps, monkeypatch
) -> None:
minimizer._generate_fit_function = MagicMock(
return_value=MagicMock(return_value=np.array([2.0]))
)
minimizer._cached_pars = {'alpha': MagicMock(value=1.0)}
minimizer.convert_to_par_object = MagicMock(return_value='converted-alpha')

mock_curve = MagicMock(return_value='curve')
monkeypatch.setattr(easyscience.fitting.minimizers.minimizer_bumps, 'Curve', mock_curve)

model = minimizer._make_model()
curve = model(np.array([1.0]), np.array([2.0]), np.array([3.0]))

assert curve == 'curve'
minimizer.convert_to_par_object.assert_called_once_with(minimizer._cached_pars['alpha'])
assert mock_curve.call_args.kwargs['palpha'] == 'converted-alpha'

def test_bumps_progress_monitor_calls_callback(self, minimizer: Bumps) -> None:
# When
callback = MagicMock(return_value=True)
Expand Down
122 changes: 122 additions & 0 deletions tests/unit/fitting/minimizers/test_minimizer_dfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,68 @@ def test_fit_passes_callback_to_model_builder(self, minimizer: DFO) -> None:
callback_on_improvement_only=False,
)

def test_fit_wraps_supplied_model_with_explicit_callback(self, minimizer: DFO) -> None:
from easyscience import global_object

global_object.stack.enabled = False

supplied_model = MagicMock()
wrapped_model = MagicMock()
explicit_callback = MagicMock()

minimizer._make_model = MagicMock()
minimizer._wrap_model_with_callback = MagicMock(return_value=wrapped_model)
minimizer._get_callback_parameter_names = MagicMock(return_value=['palpha'])
minimizer._dfo_fit = MagicMock(return_value='fit')
minimizer._set_parameter_fit_result = MagicMock()
minimizer._gen_fit_results = MagicMock(return_value='gen_fit_results')
minimizer._cached_pars = {'alpha': MagicMock(value=1.0)}

result = minimizer.fit(
x=np.array([1.0]),
y=np.array([2.0]),
weights=np.array([1.0]),
model=supplied_model,
callback=explicit_callback,
)

assert result == 'gen_fit_results'
minimizer._make_model.assert_not_called()
minimizer._wrap_model_with_callback.assert_called_once_with(
supplied_model,
['palpha'],
explicit_callback,
1,
False,
)
minimizer._dfo_fit.assert_called_once_with(minimizer._cached_pars, wrapped_model)

def test_fit_uses_supplied_model_without_callback(self, minimizer: DFO) -> None:
from easyscience import global_object

global_object.stack.enabled = False

supplied_model = MagicMock()

minimizer._make_model = MagicMock()
minimizer._wrap_model_with_callback = MagicMock()
minimizer._dfo_fit = MagicMock(return_value='fit')
minimizer._set_parameter_fit_result = MagicMock()
minimizer._gen_fit_results = MagicMock(return_value='gen_fit_results')
minimizer._cached_pars = {'alpha': MagicMock(value=1.0)}

result = minimizer.fit(
x=np.array([1.0]),
y=np.array([2.0]),
weights=np.array([1.0]),
model=supplied_model,
)

assert result == 'gen_fit_results'
minimizer._make_model.assert_not_called()
minimizer._wrap_model_with_callback.assert_not_called()
minimizer._dfo_fit.assert_called_once_with(minimizer._cached_pars, supplied_model)

def test_generate_fit_function(self, minimizer: DFO) -> None:
# When
minimizer._original_fit_function = MagicMock(return_value='fit_function_result')
Expand Down Expand Up @@ -248,6 +310,22 @@ def test_make_model_callback_every(self, minimizer: DFO) -> None:
assert state.evaluation == 2
assert all(state.xk == np.array([1222, 2333]))

def test_make_model_without_parameters_uses_cached_parameters(self, minimizer: DFO) -> None:
mock_fit_function = MagicMock(return_value=np.array([11.0]))
minimizer._generate_fit_function = MagicMock(return_value=mock_fit_function)
minimizer._cached_pars = {'alpha': MagicMock(value=1000.0)}

model = minimizer._make_model()
residuals_for_model = model(
x=np.array([1.0]),
y=np.array([10.0]),
weights=np.array([0.5]),
)

residuals_for_model(np.array([1111.0]))

assert mock_fit_function.call_args.kwargs == {'palpha': 1111.0}

@pytest.mark.parametrize('callback_every', [0, 1.3])
def test_fit_callback_every_must_be_positive(self, minimizer: DFO, callback_every) -> None:
with pytest.raises(ValueError, match='callback_every must be a positive integer'):
Expand Down Expand Up @@ -595,6 +673,50 @@ def test_progress_callback_not_used_when_explicit_callback_given(self, minimizer
call_kwargs = minimizer._make_model.call_args[1]
assert call_kwargs['callback'] is explicit_cb

@pytest.mark.parametrize(
('parameters', 'expected_names'),
[
([MagicMock(unique_name='alpha')], ['palpha']),
(None, ['pbeta']),
],
)
def test_get_callback_parameter_names_optional_parameters(
self, minimizer: DFO, parameters, expected_names
) -> None:
minimizer._cached_pars = {'beta': MagicMock(value=1.0)}

parameter_names = minimizer._get_callback_parameter_names(parameters)

assert parameter_names == expected_names

def test_wrap_model_with_callback_improvement_only(self, minimizer: DFO) -> None:
callback = MagicMock()
wrapped_model = minimizer._wrap_model_with_callback(
lambda pars_values: np.asarray([pars_values[0] - 1.0]),
['palpha'],
callback,
callback_every=1,
callback_on_improvement_only=True,
)

wrapped_model([0.5])

callback.assert_called_once()
assert callback.call_args.args[0].improved is True

def test_prepare_kwargs_with_optional_arguments(self, minimizer: DFO) -> None:
kwargs = minimizer._prepare_kwargs(tolerance=0.05, max_evaluations=11, keep=True)

assert kwargs == {
'keep': True,
'maxfun': 11,
'rhoend': 0.05,
}

def test_prepare_kwargs_rejects_large_tolerance(self, minimizer: DFO) -> None:
with pytest.raises(ValueError, match='Tolerance must be equal or smaller than 0.1'):
minimizer._prepare_kwargs(tolerance=0.2)

def test_make_progress_adapter_payload_format(self) -> None:
"""The adapter must produce the standard progress payload dict."""
progress_cb = MagicMock()
Expand Down
40 changes: 40 additions & 0 deletions tests/unit/fitting/minimizers/test_minimizer_lmfit.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# SPDX-FileCopyrightText: 2026 EasyScience contributors <https://github.com/easyscience>
# SPDX-License-Identifier: BSD-3-Clause

import warnings
from unittest.mock import MagicMock

import numpy as np
Expand Down Expand Up @@ -272,6 +273,45 @@ def test_fit_exception(self, minimizer: LMFit) -> None:
with pytest.raises(FitError):
minimizer.fit(x=1.0, y=2.0, weights=1)

def test_gen_fit_results_populates_evaluation_metadata(self, minimizer: LMFit) -> None:
fit_results = MagicMock()
fit_results.success = False
fit_results.data = 'data'
fit_results.userkws = {'x': 'x'}
fit_results.values = {'p1': 1.0}
fit_results.init_values = {'p1': 0.5}
fit_results.best_fit = 'best_fit'
fit_results.weights = 2
fit_results.nfev = 9
fit_results.message = 'max evaluations reached'

with pytest.warns(UserWarning, match='max evaluations reached'):
result = minimizer._gen_fit_results(fit_results)

assert result.success is False
assert result.n_evaluations == 9
assert result.message == 'max evaluations reached'
assert result.engine_result == fit_results

def test_gen_fit_results_success_does_not_warn(self, minimizer: LMFit) -> None:
fit_results = MagicMock()
fit_results.success = True
fit_results.data = 'data'
fit_results.userkws = {'x': 'x'}
fit_results.values = {'p1': 1.0}
fit_results.init_values = {'p1': 0.5}
fit_results.best_fit = 'best_fit'
fit_results.weights = 2
fit_results.nfev = 3
fit_results.message = 'success'

with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
result = minimizer._gen_fit_results(fit_results)

assert len(record) == 0
assert result.success is True

def test_convert_to_pars_obj(self, minimizer: LMFit, monkeypatch) -> None:
# When
minimizer._object = MagicMock()
Expand Down
Loading
Loading