Skip to content

Commit

Permalink
Finish 0.20.3
Browse files Browse the repository at this point in the history
  • Loading branch information
jonas-eschle committed Apr 20, 2024
2 parents f286220 + 57c3ae2 commit aa5aa8f
Show file tree
Hide file tree
Showing 7 changed files with 127 additions and 22 deletions.
13 changes: 13 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ Develop
Major Features and Improvements
-------------------------------


Breaking changes
------------------

Expand All @@ -29,6 +30,18 @@ Requirement changes
Thanks
------

0.20.3 (19 Apr 2024)
========================

Bug fixes and small changes
---------------------------
- consistent behavior in loss: simple loss can take a gradient and hesse function and the default base loss provides fallbacks that work correctly between ``value_gradient`` and ``gradient``. This maybe matters if you've implemented a custom loss and should fix any issues with it.
- multiprocessing would get stuck due to an `upstream bug in TensorFlow <https://github.com/tensorflow/tensorflow/issues/66115>`_. Working around it by disabling an unused piece of code.

Thanks
------
- acampoverde for finding the bug in the multiprocessing

0.20.2 (16 Apr 2024)
========================

Expand Down
1 change: 0 additions & 1 deletion zfit/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ def _maybe_disable_warnings():
f"You are using TensorFlow version {_tf.__version__}. This zfit version ({__version__}) works"
f" only with TF >= 2"
)

from . import z # initialize first
from . import (
constraint,
Expand Down
2 changes: 2 additions & 0 deletions zfit/core/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -739,6 +739,8 @@ def from_numpy(
# warn_once("The order of the arguments `obs` and `array` has been swapped, array goes first (as any other `from_` constructor.", identifier="data_from_numpy")
# obs, array = array, obs
# # legacy end
if isinstance(array, (float, int)):
array = np.array([array])
if not isinstance(array, (np.ndarray)) and not (tf.is_tensor(array) and hasattr(array, "numpy")):
msg = f"`array` has to be a `np.ndarray`. Is currently {type(array)}"
raise TypeError(msg)
Expand Down
124 changes: 107 additions & 17 deletions zfit/core/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,15 @@

from __future__ import annotations

from contextlib import suppress
from functools import partial
from typing import TYPE_CHECKING, Literal, Optional, Union

import pydantic
from pydantic import Field
from tensorflow.python.util.deprecation import deprecated

from ..exception import OutsideLimitsError
from ..exception import OutsideLimitsError, SpecificFunctionNotImplementedError
from ..serialization.serializer import BaseRepr, Serializer
from .data import convert_to_data
from .serialmixin import SerializableMixin
Expand Down Expand Up @@ -40,9 +41,11 @@
from ..util.warnings import warn_advanced_feature
from ..z.math import (
autodiff_gradient,
autodiff_hessian,
autodiff_value_gradients,
automatic_value_gradients_hessian,
numerical_gradient,
numerical_hessian,
numerical_value_gradient,
numerical_value_gradients_hessian,
)
Expand Down Expand Up @@ -156,6 +159,22 @@ def _check_container(cls, v):
return v


class GradientNotImplementedError(SpecificFunctionNotImplementedError):
pass


class ValueGradientNotImplementedError(SpecificFunctionNotImplementedError):
pass


class ValueGradientHessianNotImplementedError(SpecificFunctionNotImplementedError):
pass


class HessianNotImplementedError(SpecificFunctionNotImplementedError):
pass


class BaseLoss(ZfitLoss, BaseNumeric):
def __init__(
self,
Expand Down Expand Up @@ -569,14 +588,25 @@ def gradient(
numgrad = self._options["numgrad"] if numgrad is None else numgrad
paramvals, checked = self.check_precompile(params=paramvals)
with self._check_set_input_params(paramvals, guarantee_checked=checked):
return self._call_gradient(params, numgrad)

@z.function(wraps="loss")
def _call_gradient(self, params, numgrad):
with suppress(GradientNotImplementedError):
return self._gradient(params=params, numgrad=numgrad)

with suppress(ValueGradientNotImplementedError):
return self._value_gradient(params=params, numgrad=numgrad, full=False)[1]
return self._fallback_gradient(params=params, numgrad=numgrad)

def gradients(self, *_, **__):
msg = "`gradients` is deprecated, use `gradient` instead."
raise BreakingAPIChangeError(msg)

@z.function(wraps="loss")
def _gradient(self, params, numgrad):
def _gradient(self, params, numgrad): # noqa: ARG002
raise GradientNotImplementedError

def _fallback_gradient(self, params, numgrad):
self_value = partial(self.value, full=False)
if numgrad:
gradient = numerical_gradient(self_value, params=params)
Expand Down Expand Up @@ -622,15 +652,26 @@ def value_gradient(
full = DEFAULT_FULL_ARG
paramvals, checked = self.check_precompile(params=paramvals)
with self._check_set_input_params(paramvals, guarantee_checked=checked):
value, gradient = self._value_gradient(params=params, numgrad=numgrad, full=full)
value, gradient = self._call_value_gradient(params, numgrad, full)
return value, gradient

@z.function(wraps="loss")
def _call_value_gradient(self, params, numgrad, full):
with suppress(ValueGradientNotImplementedError):
return self._value_gradient(params=params, numgrad=numgrad, full=full)
with suppress(GradientNotImplementedError):
gradient = self._gradient(params=params, numgrad=numgrad)
return self.value(full=full), gradient
return self._fallback_value_gradient(params=params, numgrad=numgrad, full=full)

def value_gradients(self, *_, **__):
msg = "`value_gradients` is deprecated, use `value_gradient` instead."
raise BreakingAPIChangeError(msg)

@z.function(wraps="loss")
def _value_gradient(self, params, numgrad=False, *, full: bool | None = None):
def _value_gradient(self, params, numgrad, full): # noqa: ARG002
raise ValueGradientNotImplementedError

def _fallback_value_gradient(self, params, numgrad=False, *, full: bool | None = None):
if full is None:
full = DEFAULT_FULL_ARG
self_value = partial(self.value, full=full)
Expand Down Expand Up @@ -665,13 +706,31 @@ def hessian(
numgrad = self._options["numgrad"] if numgrad is None else numgrad
paramvals, checked = self.check_precompile(params=paramvals)
with self._check_set_input_params(paramvals, guarantee_checked=checked):
return self.value_gradient_hessian(params=params, hessian=hessian, full=False, numgrad=numgrad)[2]
return self._call_hessian(params, numgrad, hessian)

def _call_hessian(self, params, numgrad, hessian):
with suppress(HessianNotImplementedError):
return self._hessian(params=params, hessian=hessian, numgrad=numgrad)
with suppress(ValueGradientHessianNotImplementedError):
return self._value_gradient_hessian(params=params, hessian=hessian, numerical=numgrad, full=False)[2]
return self._fallback_hessian(params=params, hessian=hessian, numgrad=numgrad)

def _hessian(self, params, hessian, numgrad): # noqa: ARG002
raise HessianNotImplementedError

def _fallback_hessian(self, params, hessian, numgrad):
self_value = partial(self.value, full=False)
if numgrad:
hessian = numerical_hessian(self_value, params=params, hessian=hessian)
else:
hessian = autodiff_hessian(self_value, params=params, hessian=hessian)
return hessian

def value_gradient_hessian(
self,
params: ztyping.ParamTypeInput = None,
hessian=None,
*,
hessian=None,
full: bool | None = None,
numgrad=None,
paramvals: ztyping.ParamTypeInput = None,
Expand Down Expand Up @@ -707,16 +766,27 @@ def value_gradient_hessian(
full = DEFAULT_FULL_ARG
paramvals, checked = self.check_precompile(params=paramvals)
with self._check_set_input_params(paramvals, guarantee_checked=checked):
return self._call_value_gradient_hessian(params, numgrad, full, hessian)

@z.function(wraps="loss")
def _call_value_gradient_hessian(self, params, numgrad, full, hessian):
with suppress(ValueGradientHessianNotImplementedError):
return self._value_gradient_hessian(params=params, hessian=hessian, numerical=numgrad, full=full)
with suppress(HessianNotImplementedError):
hessian = self._hessian(params=params, hessian=hessian, numgrad=numgrad)
return *self.value_gradient(params=params, numgrad=numgrad, full=full), hessian
return self._fallback_value_gradient_hessian(params=params, hessian=hessian, numgrad=numgrad, full=full)

def value_gradients_hessian(self, *_, **__):
msg = "`value_gradients_hessian` is deprecated, use `value_gradient_hessian` instead."
raise BreakingAPIChangeError(msg)

@z.function(wraps="loss")
def _value_gradient_hessian(self, params, hessian, numerical=False, *, full: bool | None = None):
def _value_gradient_hessian(self, params, hessian, numerical=False, full: bool | None = None): # noqa: ARG002
raise ValueGradientHessianNotImplementedError

def _fallback_value_gradient_hessian(self, params, hessian, numgrad=False, *, full: bool | None = None):
self_value = partial(self.value, full=full)
if numerical:
if numgrad:
return numerical_value_gradients_hessian(
func=self_value, gradient=self.gradient, params=params, hessian=hessian
)
Expand Down Expand Up @@ -1111,6 +1181,9 @@ def __init__(
func: Callable,
params: Iterable[zfit.Parameter] | None = None,
errordef: float | None = None,
*,
gradient: Callable | None = None,
hessian: Callable | None = None,
# legacy
deps: Iterable[zfit.Parameter] = NONE,
dependents: Iterable[zfit.Parameter] = NONE,
Expand All @@ -1126,10 +1199,14 @@ def __init__(
the ``func`` depends on.
errordef: Definition of which change in the loss corresponds to a change of 1 sigma.
For example, 1 for Chi squared, 0.5 for negative log-likelihood.
gradient: Function that calculates the gradient of the loss with respect to the parameters. If not given,
the gradient will be calculated automatically.
hessian: Function that calculates the hessian of the loss with respect to the parameters.
If not given, the hessian will be calculated automatically.
Usage:
.. code:: python
.. code-block:: python
import zfit
import zfit.z.numpy as znp
Expand Down Expand Up @@ -1188,6 +1265,8 @@ def squared_loss(params):

self._simple_func = func
self._errordef = errordef
self._grad_fn = gradient
self._hess_fn = hessian
params = convert_to_parameters(params, prefer_constant=False)
self._params = params
self._simple_func_params = _extract_dependencies(params)
Expand All @@ -1205,6 +1284,18 @@ def _get_params(
own_params = extract_filter_params(self._params, floating=floating, extract_independent=extract_independent)
return params.union(own_params)

def _gradient(self, params, numgrad):
del numgrad
if self._grad_fn is not None:
return self._grad_fn(params)
raise GradientNotImplementedError

def _hessian(self, params, hessian, numgrad):
del hessian, numgrad
if self._hess_fn is not None:
return self._hess_fn(params)
raise HessianNotImplementedError

@property
def errordef(self):
errordef = self._errordef
Expand All @@ -1213,11 +1304,10 @@ def errordef(self):
raise RuntimeError(msg)
return errordef

# @z.function(wraps='loss')
def _loss_func(self, model, data, fit_range, constraints=None, log_offset=None): # noqa: ARG002
if log_offset not in (None, False):
msg = "log_offset is not allowed for a SimpleLoss"
raise ValueError(msg)
if log_offset is not None and log_offset is not False:
pass
# raise ValueError(msg)
try:
params = self._simple_func_params
params = tuple(params)
Expand All @@ -1227,7 +1317,7 @@ def _loss_func(self, model, data, fit_range, constraints=None, log_offset=None):
value = self._simple_func()
else:
raise error
return z.convert_to_tensor(value)
return znp.asarray(value)

def __add__(self, other):
msg = "Cannot add a SimpleLoss, 'addition' of losses can mean anything." "Add them manually"
Expand Down
4 changes: 3 additions & 1 deletion zfit/minimizers/ipopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,9 @@ def hessian_inplace(x, out):
minimizer.set(**{option: "yes" for option in warm_start_options})

# update the tolerances
self._update_tol_inplace(criterion_value=criterion_value, internal_tol=internal_tol)
self._update_tol_inplace(
criterion_value=criterion_value, internal_tol=internal_tol
) # hand-tuned 0.1 factor

else:
valid = False
Expand Down
3 changes: 2 additions & 1 deletion zfit/models/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,8 @@ def _voigt_integral_from_inf_to_inf(limits, params, model):
return sigma * np.sqrt(2 * np.pi)


limits = Space(axes=0, limits=(-znp.inf, znp.inf))
# do NOT uncomment, this can lead to deadlocks. No joke: https://github.com/tensorflow/tensorflow/issues/66115
# limits = Space(axes=0, limits=(-znp.inf, znp.inf))


# todo: this only works if executing eagerly, which fails for at least the binned PDFs
Expand Down
2 changes: 0 additions & 2 deletions zfit/util/ztyping.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,6 @@
Iterable["zfit.core.interfaces.ZfitGraphCachable"],
] #:


LimitsDictAxes = Dict[Tuple[int], "zfit.core.interfaces.ZfitLimit"] #:
LimitsDictObs = Dict[Tuple[str], "zfit.core.interfaces.ZfitLimit"] #:
LimitsDictNoCoords = Union[LimitsDictAxes, LimitsDictObs] #:
Expand All @@ -149,5 +148,4 @@
] #:
ArrayLike = tf.types.experimental.TensorLike #:


ParamValuesMap = Optional[Mapping[Union[str, "zfit.core.interfaces.ZfitParameter"], NumericalScalarType]]

0 comments on commit aa5aa8f

Please sign in to comment.