Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: Guard Minuit optimizer against provided strategy of None #2278

Merged
merged 9 commits into from
Aug 16, 2023
10 changes: 8 additions & 2 deletions src/pyhf/optimize/opt_minuit.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,10 @@ def __init__(self, *args, **kwargs):
Args:
errordef (:obj:`float`): See minuit docs. Default is ``1.0``.
steps (:obj:`int`): Number of steps for the bounds. Default is ``1000``.
strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is ``None``.
strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`.
Default is ``None``, which results in either
:attr:`iminuit.Minuit.strategy` ``0`` or ``1`` from the evaluation of
``int(not pyhf.tensorlib.default_do_grad)``.
Comment on lines +31 to +34
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

tolerance (:obj:`float`): Tolerance for termination.
See specific optimizer for detailed meaning.
Default is ``0.1``.
Expand Down Expand Up @@ -102,8 +105,11 @@ def _minimize(
# 0: Fast, user-provided gradient
# 1: Default, no user-provided gradient
strategy = options.pop(
'strategy', self.strategy if self.strategy is not None else not do_grad
'strategy', self.strategy if self.strategy is not None else int(not do_grad)
matthewfeickert marked this conversation as resolved.
Show resolved Hide resolved
)
# Passing strategy=None to options requires another check to guard against None
if strategy is None:
strategy = int(not do_grad)
matthewfeickert marked this conversation as resolved.
Show resolved Hide resolved
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As the iminuit.Minuit.strategy is an integer, instead of having to have a human determine the integer representation of the inverse of the truthiness of do_grad during debugging or inspection do this in code.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you're aiming for maximum readability, I like strategy = 0 if do_grad else 1 best.

tolerance = options.pop('tolerance', self.tolerance)
if options:
raise exceptions.Unsupported(
Expand Down
22 changes: 15 additions & 7 deletions tests/test_optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,27 +173,35 @@ def test_minuit_strategy_do_grad(mocker, backend):
assert spy.spy_return.minuit.strategy == 1


@pytest.mark.parametrize('strategy', [0, 1])
@pytest.mark.parametrize('strategy', [0, 1, 2])
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Adding 2 as we can and I forgot it existed.

def test_minuit_strategy_global(mocker, backend, strategy):
pyhf.set_backend(
pyhf.tensorlib, pyhf.optimize.minuit_optimizer(strategy=strategy, tolerance=0.2)
)
spy = mocker.spy(pyhf.optimize.minuit_optimizer, '_minimize')
m = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])
data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata)
model = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])
data = pyhf.tensorlib.astensor([125.0] + model.config.auxdata)

pyhf.infer.mle.fit(data, m)
pyhf.infer.mle.fit(data, model)
assert spy.call_count == 1
assert spy.spy_return.minuit.strategy == strategy

pyhf.infer.mle.fit(data, m, strategy=0)
pyhf.infer.mle.fit(data, model, strategy=None)
assert spy.call_count == 2
assert spy.spy_return.minuit.strategy == 0
assert spy.spy_return.minuit.strategy == int(not pyhf.tensorlib.default_do_grad)

pyhf.infer.mle.fit(data, m, strategy=1)
pyhf.infer.mle.fit(data, model, strategy=0)
assert spy.call_count == 3
assert spy.spy_return.minuit.strategy == 0

pyhf.infer.mle.fit(data, model, strategy=1)
assert spy.call_count == 4
assert spy.spy_return.minuit.strategy == 1

pyhf.infer.mle.fit(data, model, strategy=2)
assert spy.call_count == 5
assert spy.spy_return.minuit.strategy == 2


def test_set_tolerance(backend):
m = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])
Expand Down