Skip to content

Commit

Permalink
build: Update lower bound of pyhf to v0.7.0 (#367)
Browse files Browse the repository at this point in the history
* Update lower bound of pyhf to v0.7.0
* Update model.config.par_names() to model.config.par_names due to pyhf API change
  • Loading branch information
matthewfeickert committed Sep 25, 2022
1 parent 278d088 commit 6f15371
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 13 deletions.
2 changes: 1 addition & 1 deletion setup.cfg
Expand Up @@ -23,7 +23,7 @@ packages = find:
package_dir = =src
python_requires = >=3.7
install_requires =
pyhf[minuit]==0.7.0rc4 # model.config.suggested_fixed API change, staterror fix, set_poi(None)
pyhf[minuit]~=0.7.0 # model.config.suggested_fixed / .par_names API changes, set_poi(None)
boost_histogram>=1.0.0 # subclassing with family, 1.02 for stdev scaling fix (currently not needed)
awkward>=1.8 # _v2 API in submodule
tabulate>=0.8.1 # multiline text
Expand Down
12 changes: 6 additions & 6 deletions src/cabinetry/fit/__init__.py
Expand Up @@ -109,7 +109,7 @@ def _fit_model_pyhf(
uncertainty = np.where(
result_obj.minuit.fixed, 0.0, pyhf.tensorlib.to_numpy(result[:, 1])
)
labels = model.config.par_names()
labels = model.config.par_names
corr_mat = pyhf.tensorlib.to_numpy(corr_mat)
best_twice_nll = float(best_twice_nll) # convert 0-dim np.ndarray to float

Expand Down Expand Up @@ -183,7 +183,7 @@ def _fit_model_custom(
fix_pars = fix_pars or model.config.suggested_fixed()
par_bounds = par_bounds or model.config.suggested_bounds()

labels = model.config.par_names()
labels = model.config.par_names

def twice_nll_func(pars: np.ndarray) -> Any:
"""The objective for minimization: twice the negative log-likelihood.
Expand Down Expand Up @@ -560,7 +560,7 @@ def ranking(
custom_fit=custom_fit,
)

labels = model.config.par_names()
labels = model.config.par_names
prefit_unc = model_utils.prefit_uncertainties(model)

# use POI given by kwarg, fall back to POI specified in model
Expand Down Expand Up @@ -694,7 +694,7 @@ def scan(
ScanResults: includes parameter name, scanned values and 2*log(likelihood)
offset
"""
labels = model.config.par_names()
labels = model.config.par_names

# get index of parameter with name par_name
par_index = model_utils._parameter_index(par_name, labels)
Expand Down Expand Up @@ -837,7 +837,7 @@ def limit(
# set POI name in model config to desired value, hypotest will pick this up
# save original value to reset model later
original_model_poi_name = model.config.poi_name
model.config.set_poi(model.config.par_names()[poi_index])
model.config.set_poi(model.config.par_names[poi_index])

# show two decimals only if confidence level in percent is not an integer
cl_label = (
Expand Down Expand Up @@ -1091,7 +1091,7 @@ def significance(
# set POI name in model config to desired value, hypotest will pick this up
# save original value to reset model later
original_model_poi_name = model.config.poi_name
model.config.set_poi(model.config.par_names()[poi_index])
model.config.set_poi(model.config.par_names[poi_index])

log.info(f"calculating discovery significance for {model.config.poi_name}")
obs_p_val, exp_p_val = pyhf.infer.hypotest(
Expand Down
6 changes: 3 additions & 3 deletions src/cabinetry/model_utils.py
Expand Up @@ -425,7 +425,7 @@ def prediction(
ModelPrediction: model, yields and uncertainties per bin and channel
"""
if fit_results is not None:
if fit_results.labels != model.config.par_names():
if fit_results.labels != model.config.par_names:
log.warning("parameter names in fit results and model do not match")
# fit results specified, so they are used
param_values = fit_results.bestfit
Expand Down Expand Up @@ -531,7 +531,7 @@ def _poi_index(
"""
if poi_name is not None:
# use POI given by kwarg if specified
poi_index = _parameter_index(poi_name, model.config.par_names())
poi_index = _parameter_index(poi_name, model.config.par_names)
if poi_index is None:
raise ValueError(f"parameter {poi_name} not found in model")
elif model.config.poi_index is not None:
Expand Down Expand Up @@ -642,7 +642,7 @@ def match_fit_results(model: pyhf.pdf.Model, fit_results: FitResults) -> FitResu

bestfit = asimov_parameters(model) # Asimov parameter values for target model
uncertainty = prefit_uncertainties(model) # pre-fit uncertainties for target model
labels = model.config.par_names() # labels for target model
labels = model.config.par_names # labels for target model

# indices of parameters in current fit results, or None if they are missing
indices_for_corr: List[Optional[int]] = [None] * len(labels)
Expand Down
6 changes: 3 additions & 3 deletions tests/test_model_utils.py
Expand Up @@ -470,7 +470,7 @@ def test_match_fit_results(mock_pars, mock_uncs):
)

# remove par_a, flip par_b and par_c, add par_d
mock_model.config.par_names.return_value = ["par_c", "par_d", "par_b"]
mock_model.config.par_names = ["par_c", "par_d", "par_b"]
matched_fit_res = model_utils.match_fit_results(mock_model, fit_results)
assert mock_pars.call_args_list == [((mock_model,), {})]
assert mock_uncs.call_args_list == [((mock_model,), {})]
Expand All @@ -484,7 +484,7 @@ def test_match_fit_results(mock_pars, mock_uncs):
assert matched_fit_res.goodness_of_fit == 0.1

# all parameters are new
mock_model.config.par_names.return_value = ["par_d", "par_e"]
mock_model.config.par_names = ["par_d", "par_e"]
matched_fit_res = model_utils.match_fit_results(mock_model, fit_results)
assert np.allclose(matched_fit_res.bestfit, [4.0, 5.0])
assert np.allclose(matched_fit_res.uncertainty, [0.4, 0.5])
Expand All @@ -494,7 +494,7 @@ def test_match_fit_results(mock_pars, mock_uncs):
assert matched_fit_res.goodness_of_fit == 0.1

# fit results already match model exactly
mock_model.config.par_names.return_value = ["par_a", "par_b", "par_c"]
mock_model.config.par_names = ["par_a", "par_b", "par_c"]
matched_fit_res = model_utils.match_fit_results(mock_model, fit_results)
assert np.allclose(matched_fit_res.bestfit, [1.0, 2.0, 3.0])
assert np.allclose(matched_fit_res.uncertainty, [0.1, 0.2, 0.3])
Expand Down

0 comments on commit 6f15371

Please sign in to comment.