From acbf63313ac4943cb82ad272712f8fec997a85a7 Mon Sep 17 00:00:00 2001 From: Jonas Rembser Date: Sat, 4 Oct 2025 11:34:49 +0200 Subject: [PATCH 1/7] [Minuit2] Use native FCNBase type in NumericalDerivator class We should avoid leaking the ROOT Math classes into Minuit 2. --- math/minuit2/inc/Minuit2/Minuit2Minimizer.h | 5 +- math/minuit2/inc/Minuit2/NumericalDerivator.h | 17 +++---- math/minuit2/src/NumericalDerivator.cxx | 49 +++++++++---------- .../TestStatistics/LikelihoodGradientJob.cxx | 18 +++++-- 4 files changed, 47 insertions(+), 42 deletions(-) diff --git a/math/minuit2/inc/Minuit2/Minuit2Minimizer.h b/math/minuit2/inc/Minuit2/Minuit2Minimizer.h index 4d5d1e8144ca6..2972a8823c1e6 100644 --- a/math/minuit2/inc/Minuit2/Minuit2Minimizer.h +++ b/math/minuit2/inc/Minuit2/Minuit2Minimizer.h @@ -279,6 +279,9 @@ class Minuit2Minimizer : public ROOT::Math::Minimizer { /// return the minimizer state (containing values, step size , etc..) const ROOT::Minuit2::MnUserParameterState &State() { return fState; } + const ROOT::Minuit2::FCNBase *GetFCN() const { return fMinuitFCN; } + ROOT::Minuit2::FCNBase *GetFCN() { return fMinuitFCN; } + protected: // protected function for accessing the internal Minuit2 object. Needed for derived classes @@ -288,8 +291,6 @@ class Minuit2Minimizer : public ROOT::Math::Minimizer { void SetMinimizerType(ROOT::Minuit2::EMinimizerType type); - virtual const ROOT::Minuit2::FCNBase *GetFCN() const { return fMinuitFCN; } - /// examine the minimum result bool ExamineMinimum(const ROOT::Minuit2::FunctionMinimum &min); diff --git a/math/minuit2/inc/Minuit2/NumericalDerivator.h b/math/minuit2/inc/Minuit2/NumericalDerivator.h index 42f52044c4946..c4ad937fe594f 100644 --- a/math/minuit2/inc/Minuit2/NumericalDerivator.h +++ b/math/minuit2/inc/Minuit2/NumericalDerivator.h @@ -18,11 +18,10 @@ #ifndef ROOT_Minuit2_NumericalDerivator #define ROOT_Minuit2_NumericalDerivator -#include - #include "Fit/ParameterSettings.h" #include "Minuit2/MnParameterTransformation.h" #include "Minuit2/MnMachinePrecision.h" +#include "Minuit2/FCNBase.h" #include @@ -45,19 +44,19 @@ class NumericalDerivator { NumericalDerivator(double step_tolerance, double grad_tolerance, unsigned int ncycles, double error_level, bool always_exactly_mimic_minuit2 = true); - void SetupDifferentiate(const ROOT::Math::IBaseFunctionMultiDim *function, const double *cx, + void SetupDifferentiate(unsigned int nDim, const FCNBase *function, const double *cx, std::span parameters); - std::vector Differentiate(const ROOT::Math::IBaseFunctionMultiDim *function, const double *x, + std::vector Differentiate(unsigned int nDim, const FCNBase *function, const double *x, std::span parameters, std::span previous_gradient); - DerivatorElement PartialDerivative(const ROOT::Math::IBaseFunctionMultiDim *function, const double *x, + DerivatorElement PartialDerivative(unsigned int nDim, const FCNBase *function, const double *x, std::span parameters, unsigned int i_component, DerivatorElement previous); - DerivatorElement FastPartialDerivative(const ROOT::Math::IBaseFunctionMultiDim *function, + DerivatorElement FastPartialDerivative(const FCNBase *function, std::span parameters, unsigned int i_component, const DerivatorElement &previous); - DerivatorElement operator()(const ROOT::Math::IBaseFunctionMultiDim *function, const double *x, + DerivatorElement operator()(unsigned int nDim, const FCNBase *function, const double *x, std::span parameters, unsigned int i_component, const DerivatorElement &previous); @@ -71,8 +70,7 @@ class NumericalDerivator { double Ext2int(const ROOT::Fit::ParameterSettings ¶meter, double val) const; double DInt2Ext(const ROOT::Fit::ParameterSettings ¶meter, double val) const; - void SetInitialGradient(const ROOT::Math::IBaseFunctionMultiDim *function, - std::span parameters, + void SetInitialGradient(std::span parameters, std::vector &gradient); inline bool AlwaysExactlyMimicMinuit2() const { return fAlwaysExactlyMimicMinuit2; } @@ -100,7 +98,6 @@ class NumericalDerivator { unsigned int fNCycles = 2; bool fAlwaysExactlyMimicMinuit2; - }; std::ostream &operator<<(std::ostream &out, const DerivatorElement &value); diff --git a/math/minuit2/src/NumericalDerivator.cxx b/math/minuit2/src/NumericalDerivator.cxx index ebd62aaa83a29..d028ed95d7c84 100644 --- a/math/minuit2/src/NumericalDerivator.cxx +++ b/math/minuit2/src/NumericalDerivator.cxx @@ -26,7 +26,6 @@ #include "Minuit2/NumericalDerivator.h" #include #include -#include #include #include #include @@ -44,51 +43,52 @@ NumericalDerivator::NumericalDerivator(bool always_exactly_mimic_minuit2) NumericalDerivator::NumericalDerivator(double step_tolerance, double grad_tolerance, unsigned int ncycles, double error_level, bool always_exactly_mimic_minuit2) - : fStepTolerance(step_tolerance), fGradTolerance(grad_tolerance), fUp(error_level), fNCycles(ncycles), + : fStepTolerance(step_tolerance), + fGradTolerance(grad_tolerance), + fUp(error_level), + fNCycles(ncycles), fAlwaysExactlyMimicMinuit2(always_exactly_mimic_minuit2) { } -NumericalDerivator::NumericalDerivator(const NumericalDerivator &/*other*/) = default; - +NumericalDerivator::NumericalDerivator(const NumericalDerivator & /*other*/) = default; /// This function sets internal state based on input parameters. This state /// setup is used in the actual (partial) derivative calculations. -void NumericalDerivator::SetupDifferentiate(const ROOT::Math::IBaseFunctionMultiDim *function, const double *cx, +void NumericalDerivator::SetupDifferentiate(unsigned int nDim, const FCNBase *function, const double *cx, std::span parameters) { assert(function != nullptr && "function is a nullptr"); - fVx.resize(function->NDim()); - fVxExternal.resize(function->NDim()); - fVxFValCache.resize(function->NDim()); - std::copy(cx, cx + function->NDim(), fVx.data()); + fVx.resize(nDim); + fVxExternal.resize(nDim); + fVxFValCache.resize(nDim); + std::copy(cx, cx + nDim, fVx.data()); // convert to Minuit external parameters - for (unsigned i = 0; i < function->NDim(); i++) { + for (unsigned i = 0; i < nDim; i++) { fVxExternal[i] = Int2ext(parameters[i], fVx[i]); } if (fVx != fVxFValCache) { fVxFValCache = fVx; - fVal = (*function)(fVxExternal.data()); // value of function at given points + fVal = (*function)(fVxExternal); // value of function at given points } fDfmin = 8. * fPrecision.Eps2() * (std::abs(fVal) + fUp); fVrysml = 8. * fPrecision.Eps() * fPrecision.Eps(); } -DerivatorElement NumericalDerivator::PartialDerivative(const ROOT::Math::IBaseFunctionMultiDim *function, - const double *x, +DerivatorElement NumericalDerivator::PartialDerivative(unsigned int nDim, const FCNBase *function, const double *x, std::span parameters, unsigned int i_component, DerivatorElement previous) { - SetupDifferentiate(function, x, parameters); + SetupDifferentiate(nDim, function, x, parameters); return FastPartialDerivative(function, parameters, i_component, previous); } // leaves the parameter setup to the caller -DerivatorElement NumericalDerivator::FastPartialDerivative(const ROOT::Math::IBaseFunctionMultiDim *function, +DerivatorElement NumericalDerivator::FastPartialDerivative(const FCNBase *function, std::span parameters, unsigned int i_component, const DerivatorElement &previous) { @@ -121,11 +121,11 @@ DerivatorElement NumericalDerivator::FastPartialDerivative(const ROOT::Math::IBa step_old = step; fVx[i_component] = xtf + step; fVxExternal[i_component] = Int2ext(parameters[i_component], fVx[i_component]); - double fs1 = (*function)(fVxExternal.data()); + double fs1 = (*function)(fVxExternal); fVx[i_component] = xtf - step; fVxExternal[i_component] = Int2ext(parameters[i_component], fVx[i_component]); - double fs2 = (*function)(fVxExternal.data()); + double fs2 = (*function)(fVxExternal); fVx[i_component] = xtf; fVxExternal[i_component] = Int2ext(parameters[i_component], fVx[i_component]); @@ -142,24 +142,24 @@ DerivatorElement NumericalDerivator::FastPartialDerivative(const ROOT::Math::IBa return deriv; } -DerivatorElement NumericalDerivator::operator()(const ROOT::Math::IBaseFunctionMultiDim *function, const double *x, +DerivatorElement NumericalDerivator::operator()(unsigned int nDim, const FCNBase *function, const double *x, std::span parameters, unsigned int i_component, const DerivatorElement &previous) { - return PartialDerivative(function, x, parameters, i_component, previous); + return PartialDerivative(nDim, function, x, parameters, i_component, previous); } std::vector -NumericalDerivator::Differentiate(const ROOT::Math::IBaseFunctionMultiDim *function, const double *cx, +NumericalDerivator::Differentiate(unsigned int nDim, const FCNBase *function, const double *cx, std::span parameters, std::span previous_gradient) { - SetupDifferentiate(function, cx, parameters); + SetupDifferentiate(nDim, function, cx, parameters); std::vector gradient; - gradient.reserve(function->NDim()); + gradient.reserve(nDim); - for (unsigned int ix = 0; ix < function->NDim(); ++ix) { + for (unsigned int ix = 0; ix < nDim; ++ix) { gradient.emplace_back(FastPartialDerivative(function, parameters, ix, previous_gradient[ix])); } @@ -221,8 +221,7 @@ double NumericalDerivator::DInt2Ext(const ROOT::Fit::ParameterSettings ¶mete // MODIFIED: /// This function was not implemented as in Minuit2. Now it copies the behavior /// of InitialGradientCalculator. See https://github.com/roofit-dev/root/issues/10 -void NumericalDerivator::SetInitialGradient(const ROOT::Math::IBaseFunctionMultiDim *, - std::span parameters, +void NumericalDerivator::SetInitialGradient(std::span parameters, std::vector &gradient) { // set an initial gradient using some given steps diff --git a/roofit/roofitcore/src/TestStatistics/LikelihoodGradientJob.cxx b/roofit/roofitcore/src/TestStatistics/LikelihoodGradientJob.cxx index 4a1537313f9ae..95e0cd68011ee 100644 --- a/roofit/roofitcore/src/TestStatistics/LikelihoodGradientJob.cxx +++ b/roofit/roofitcore/src/TestStatistics/LikelihoodGradientJob.cxx @@ -20,6 +20,8 @@ #include "RooMsgService.h" #include "RooMinimizer.h" +#include "Minuit2/Minuit2Minimizer.h" + #include "Minuit2/MnStrategy.h" namespace RooFit { @@ -44,9 +46,9 @@ void LikelihoodGradientJob::synchronizeParameterSettings( } void LikelihoodGradientJob::synchronizeParameterSettings( - ROOT::Math::IMultiGenFunction *function, const std::vector ¶meter_settings) + ROOT::Math::IMultiGenFunction * /*function*/, const std::vector ¶meter_settings) { - gradf_.SetInitialGradient(function, parameter_settings, grad_); + gradf_.SetInitialGradient(parameter_settings, grad_); } void LikelihoodGradientJob::synchronizeWithMinimizer(const ROOT::Math::MinimizerOptions &options) @@ -185,9 +187,12 @@ void LikelihoodGradientJob::update_state() std::copy(offsets_message_begin, offsets_message_end, shared_offset_.offsets().begin()); } + // Since the gradient parallelization only support Minuit 2, we can do this cast + auto &minim = static_cast(*minimizer_->_minimizer); + // note: the next call must stay after the (possible) update of the offset, because it // calls the likelihood function, so the offset must be correct at this point - gradf_.SetupDifferentiate(minimizer_->getMultiGenFcn(), minuit_internal_x_.data(), + gradf_.SetupDifferentiate(minimizer_->getNPar(), minim.GetFCN(), minuit_internal_x_.data(), minimizer_->fitter()->Config().ParamsSettings()); } } @@ -199,9 +204,12 @@ void LikelihoodGradientJob::update_state() void LikelihoodGradientJob::run_derivator(unsigned int i_component) const { + // Since the gradient parallelization only support Minuit 2, we can do this cast + auto &minim = static_cast(*minimizer_->_minimizer); + // Calculate the derivative etc for these parameters - grad_[i_component] = gradf_.FastPartialDerivative( - minimizer_->getMultiGenFcn(), minimizer_->fitter()->Config().ParamsSettings(), i_component, grad_[i_component]); + grad_[i_component] = gradf_.FastPartialDerivative(minim.GetFCN(), minimizer_->fitter()->Config().ParamsSettings(), + i_component, grad_[i_component]); } void LikelihoodGradientJob::calculate_all() From 1f8ce3e5086c096c302a50fd3294c06facf419d3 Mon Sep 17 00:00:00 2001 From: Jonas Rembser Date: Fri, 3 Oct 2025 17:50:30 +0200 Subject: [PATCH 2/7] [Minuit2] Use smart pointers in Minuit2Minimizer --- math/minuit2/inc/Minuit2/Minuit2Minimizer.h | 15 ++-- math/minuit2/src/Minuit2Minimizer.cxx | 99 +++++++++------------ 2 files changed, 47 insertions(+), 67 deletions(-) diff --git a/math/minuit2/inc/Minuit2/Minuit2Minimizer.h b/math/minuit2/inc/Minuit2/Minuit2Minimizer.h index 2972a8823c1e6..acbe533047ef6 100644 --- a/math/minuit2/inc/Minuit2/Minuit2Minimizer.h +++ b/math/minuit2/inc/Minuit2/Minuit2Minimizer.h @@ -279,15 +279,13 @@ class Minuit2Minimizer : public ROOT::Math::Minimizer { /// return the minimizer state (containing values, step size , etc..) const ROOT::Minuit2::MnUserParameterState &State() { return fState; } - const ROOT::Minuit2::FCNBase *GetFCN() const { return fMinuitFCN; } - ROOT::Minuit2::FCNBase *GetFCN() { return fMinuitFCN; } + const ROOT::Minuit2::FCNBase *GetFCN() const { return fMinuitFCN.get(); } + ROOT::Minuit2::FCNBase *GetFCN() { return fMinuitFCN.get(); } protected: // protected function for accessing the internal Minuit2 object. Needed for derived classes - virtual const ROOT::Minuit2::ModularFunctionMinimizer *GetMinimizer() const { return fMinimizer; } - - virtual void SetMinimizer(ROOT::Minuit2::ModularFunctionMinimizer *m) { fMinimizer = m; } + virtual const ROOT::Minuit2::ModularFunctionMinimizer *GetMinimizer() const { return fMinimizer.get(); } void SetMinimizerType(ROOT::Minuit2::EMinimizerType type); @@ -309,10 +307,9 @@ class Minuit2Minimizer : public ROOT::Math::Minimizer { int fMinosStatus = -1; // Minos status code ROOT::Minuit2::MnUserParameterState fState; - // std::vector fMinosErrors; - ROOT::Minuit2::ModularFunctionMinimizer *fMinimizer; - ROOT::Minuit2::FCNBase *fMinuitFCN; - ROOT::Minuit2::FunctionMinimum *fMinimum; + std::unique_ptr fMinimizer; + std::unique_ptr fMinuitFCN; + std::unique_ptr fMinimum; mutable std::vector fValues; mutable std::vector fErrors; }; diff --git a/math/minuit2/src/Minuit2Minimizer.cxx b/math/minuit2/src/Minuit2Minimizer.cxx index efc95295a6b91..e8a134512fb9d 100644 --- a/math/minuit2/src/Minuit2Minimizer.cxx +++ b/math/minuit2/src/Minuit2Minimizer.cxx @@ -82,14 +82,13 @@ int ControlPrintLevel() void RestoreGlobalPrintLevel(int) {} #endif -Minuit2Minimizer::Minuit2Minimizer(ROOT::Minuit2::EMinimizerType type) - : fDim(0), fMinimizer(nullptr), fMinuitFCN(nullptr), fMinimum(nullptr) +Minuit2Minimizer::Minuit2Minimizer(ROOT::Minuit2::EMinimizerType type) : fDim(0) { // Default constructor implementation depending on minimizer type SetMinimizerType(type); } -Minuit2Minimizer::Minuit2Minimizer(const char *type) : fDim(0), fMinimizer(nullptr), fMinuitFCN(nullptr), fMinimum(nullptr) +Minuit2Minimizer::Minuit2Minimizer(const char *type) : fDim(0) { // constructor from a string @@ -117,49 +116,31 @@ void Minuit2Minimizer::SetMinimizerType(ROOT::Minuit2::EMinimizerType type) // Set minimizer algorithm type fUseFumili = false; switch (type) { - case ROOT::Minuit2::kMigrad: - // std::cout << "Minuit2Minimizer: minimize using MIGRAD " << std::endl; - SetMinimizer(new ROOT::Minuit2::VariableMetricMinimizer()); - return; + case ROOT::Minuit2::kMigrad: fMinimizer = std::make_unique(); return; case ROOT::Minuit2::kMigradBFGS: - // std::cout << "Minuit2Minimizer: minimize using MIGRAD " << std::endl; - SetMinimizer(new ROOT::Minuit2::VariableMetricMinimizer(VariableMetricMinimizer::BFGSType())); - return; - case ROOT::Minuit2::kSimplex: - // std::cout << "Minuit2Minimizer: minimize using SIMPLEX " << std::endl; - SetMinimizer(new ROOT::Minuit2::SimplexMinimizer()); + fMinimizer = std::make_unique(VariableMetricMinimizer::BFGSType()); return; - case ROOT::Minuit2::kCombined: SetMinimizer(new ROOT::Minuit2::CombinedMinimizer()); return; - case ROOT::Minuit2::kScan: SetMinimizer(new ROOT::Minuit2::ScanMinimizer()); return; + case ROOT::Minuit2::kSimplex: fMinimizer = std::make_unique(); return; + case ROOT::Minuit2::kCombined: fMinimizer = std::make_unique(); return; + case ROOT::Minuit2::kScan: fMinimizer = std::make_unique(); return; case ROOT::Minuit2::kFumili: - SetMinimizer(new ROOT::Minuit2::FumiliMinimizer()); + fMinimizer = std::make_unique(); fUseFumili = true; return; default: // migrad minimizer - SetMinimizer(new ROOT::Minuit2::VariableMetricMinimizer()); + fMinimizer = std::make_unique(); } } -Minuit2Minimizer::~Minuit2Minimizer() -{ - // Destructor implementation. - if (fMinimizer) - delete fMinimizer; - if (fMinuitFCN) - delete fMinuitFCN; - if (fMinimum) - delete fMinimum; -} +Minuit2Minimizer::~Minuit2Minimizer() = default; void Minuit2Minimizer::Clear() { // delete the state in case of consecutive minimizations fState = MnUserParameterState(); // clear also the function minimum - if (fMinimum) - delete fMinimum; - fMinimum = nullptr; + fMinimum.reset(); } // set variables @@ -390,33 +371,37 @@ bool Minuit2Minimizer::GetVariableSettings(unsigned int ivar, ROOT::Fit::Paramet void Minuit2Minimizer::SetFunction(const ROOT::Math::IMultiGenFunction &func) { // set function to be minimized - if (fMinuitFCN) - delete fMinuitFCN; + fMinuitFCN.reset(); fDim = func.NDim(); const bool hasGrad = func.HasGradient(); if (!fUseFumili) { - fMinuitFCN = hasGrad ? static_cast(new ROOT::Minuit2::FCNGradAdapter(dynamic_cast(func), ErrorDef())) - : static_cast(new ROOT::Minuit2::FCNAdapter(func, ErrorDef())); + if (hasGrad) + fMinuitFCN = std::make_unique>( + dynamic_cast(func), ErrorDef()); + else + fMinuitFCN = std::make_unique>(func, ErrorDef()); + return; + } + if (hasGrad) { + // for Fumili the fit method function interface is required + auto fcnfunc = dynamic_cast(&func); + if (!fcnfunc) { + MnPrint print("Minuit2Minimizer", PrintLevel()); + print.Error("Wrong Fit method function for Fumili"); + return; + } + fMinuitFCN = std::make_unique>(*fcnfunc, fDim, + ErrorDef()); } else { - if(hasGrad) { - // for Fumili the fit method function interface is required - auto fcnfunc = dynamic_cast(&func); - if (!fcnfunc) { - MnPrint print("Minuit2Minimizer", PrintLevel()); - print.Error("Wrong Fit method function for Fumili"); - return; - } - fMinuitFCN = new ROOT::Minuit2::FumiliFCNAdapter(*fcnfunc, fDim, ErrorDef()); - } else { - // for Fumili the fit method function interface is required - auto fcnfunc = dynamic_cast(&func); - if (!fcnfunc) { - MnPrint print("Minuit2Minimizer", PrintLevel()); - print.Error("Wrong Fit method function for Fumili"); - return; - } - fMinuitFCN = new ROOT::Minuit2::FumiliFCNAdapter(*fcnfunc, fDim, ErrorDef()); + // for Fumili the fit method function interface is required + auto fcnfunc = dynamic_cast(&func); + if (!fcnfunc) { + MnPrint print("Minuit2Minimizer", PrintLevel()); + print.Error("Wrong Fit method function for Fumili"); + return; } + fMinuitFCN = + std::make_unique>(*fcnfunc, fDim, ErrorDef()); } } @@ -424,7 +409,7 @@ void Minuit2Minimizer::SetHessianFunction(std::function *>(fMinuitFCN); + auto fcn = static_cast *>(fMinuitFCN.get()); if (!fcn) return; fcn->SetHessianFunction(hfunc); } @@ -482,9 +467,7 @@ bool Minuit2Minimizer::Minimize() assert(GetMinimizer() != nullptr); // delete result of previous minimization - if (fMinimum) - delete fMinimum; - fMinimum = nullptr; + fMinimum.reset(); const int maxfcn = MaxFunctionCalls(); const double tol = Tolerance(); @@ -532,7 +515,7 @@ bool Minuit2Minimizer::Minimize() std::string fumiliMethod; ret = minuit2Opt->GetValue("FumiliMethod", fumiliMethod); if (ret) { - auto fumiliMinimizer = dynamic_cast(fMinimizer); + auto fumiliMinimizer = dynamic_cast(fMinimizer.get()); if (fumiliMinimizer) fumiliMinimizer->SetMethod(fumiliMethod); } @@ -579,7 +562,7 @@ bool Minuit2Minimizer::Minimize() const ROOT::Minuit2::MnStrategy strategy = customizedStrategy(strategyLevel, fOptions); ROOT::Minuit2::FunctionMinimum min = GetMinimizer()->Minimize(*fMinuitFCN, fState, strategy, maxfcn, tol); - fMinimum = new ROOT::Minuit2::FunctionMinimum(min); + fMinimum = std::make_unique(min); // check if Hesse needs to be run. We do it when is requested (IsValidError() == true , set by SetParabError(true) in fitConfig) // (IsValidError() means the flag to get correct error from the Minimizer is set (Minimizer::SetValidError()) From 88931d769e195ff448267c211efb1cd892f832e7 Mon Sep 17 00:00:00 2001 From: Jonas Rembser Date: Sat, 4 Oct 2025 11:47:06 +0200 Subject: [PATCH 3/7] [RF] Refactor multiprocess minimizer code to use Minuit 2 directly This commit proposes to use Minuit 2 directly via the `Minuit2Minimizer` instead of the `ROOT::Math::Minimizer` abstraction. The benefit is that we will be able to remove some interfaces in the `ROOT::Math` classes that were only motivated by the control that RooFit multiprocessing needs over Minuit 2, leaking implementation details. --- math/minuit2/inc/Minuit2/Minuit2Minimizer.h | 3 + math/minuit2/src/Minuit2Minimizer.cxx | 6 ++ roofit/roofitcore/CMakeLists.txt | 7 ++- .../LikelihoodGradientWrapper.h | 4 +- roofit/roofitcore/inc/RooMinimizer.h | 10 +--- roofit/roofitcore/src/RooAbsMinimizerFcn.h | 5 +- roofit/roofitcore/src/RooMinimizer.cxx | 18 ++---- roofit/roofitcore/src/RooMinimizerFcn.cxx | 5 ++ roofit/roofitcore/src/RooMinimizerFcn.h | 3 +- .../TestStatistics/LikelihoodGradientJob.cxx | 10 +--- .../TestStatistics/LikelihoodGradientJob.h | 5 +- .../LikelihoodGradientWrapper.cxx | 2 +- .../src/TestStatistics/MinuitFcnGrad.cxx | 58 +++++++++++-------- .../src/TestStatistics/MinuitFcnGrad.h | 9 +-- 14 files changed, 76 insertions(+), 69 deletions(-) diff --git a/math/minuit2/inc/Minuit2/Minuit2Minimizer.h b/math/minuit2/inc/Minuit2/Minuit2Minimizer.h index acbe533047ef6..dd1669a4f1fd4 100644 --- a/math/minuit2/inc/Minuit2/Minuit2Minimizer.h +++ b/math/minuit2/inc/Minuit2/Minuit2Minimizer.h @@ -279,6 +279,9 @@ class Minuit2Minimizer : public ROOT::Math::Minimizer { /// return the minimizer state (containing values, step size , etc..) const ROOT::Minuit2::MnUserParameterState &State() { return fState; } + /// To set the function directly to a Minuit 2 function. + void SetFCN(unsigned int nDim, std::unique_ptr fcn); + const ROOT::Minuit2::FCNBase *GetFCN() const { return fMinuitFCN.get(); } ROOT::Minuit2::FCNBase *GetFCN() { return fMinuitFCN.get(); } diff --git a/math/minuit2/src/Minuit2Minimizer.cxx b/math/minuit2/src/Minuit2Minimizer.cxx index e8a134512fb9d..975f1bab412fc 100644 --- a/math/minuit2/src/Minuit2Minimizer.cxx +++ b/math/minuit2/src/Minuit2Minimizer.cxx @@ -1350,6 +1350,12 @@ void Minuit2Minimizer::SetStorageLevel(int level) fMinimizer->Builder().SetStorageLevel(level); } +void Minuit2Minimizer::SetFCN(unsigned int nDim, std::unique_ptr fcn) +{ + fDim = nDim; + fMinuitFCN = std::move(fcn); +} + } // end namespace Minuit2 } // end namespace ROOT diff --git a/roofit/roofitcore/CMakeLists.txt b/roofit/roofitcore/CMakeLists.txt index 0f397c2b1d449..d6c956ab1e1a9 100644 --- a/roofit/roofitcore/CMakeLists.txt +++ b/roofit/roofitcore/CMakeLists.txt @@ -10,7 +10,11 @@ ############################################################################ if(roofit_multiprocess) - set(RooFitMPTestStatisticsSources src/TestStatistics/LikelihoodJob.cxx src/TestStatistics/LikelihoodGradientJob.cxx) + set(RooFitMPTestStatisticsSources + src/TestStatistics/LikelihoodGradientJob.cxx + src/TestStatistics/LikelihoodJob.cxx + src/TestStatistics/MinuitFcnGrad.cxx + ) list(APPEND EXTRA_LIBRARIES RooFitMultiProcess) #FIXME: The ProcessTimer.h exposes json in its interface: list(APPEND EXTRA_LIBRARIES nlohmann_json::nlohmann_json) @@ -436,7 +440,6 @@ ROOT_STANDARD_LIBRARY_PACKAGE(RooFitCore src/TestStatistics/LikelihoodGradientWrapper.cxx src/TestStatistics/LikelihoodSerial.cxx src/TestStatistics/LikelihoodWrapper.cxx - src/TestStatistics/MinuitFcnGrad.cxx src/TestStatistics/RooAbsL.cxx src/TestStatistics/RooBinnedL.cxx src/TestStatistics/RooRealL.cxx diff --git a/roofit/roofitcore/inc/RooFit/TestStatistics/LikelihoodGradientWrapper.h b/roofit/roofitcore/inc/RooFit/TestStatistics/LikelihoodGradientWrapper.h index d5fc83efd7cd2..825b01576c10d 100644 --- a/roofit/roofitcore/inc/RooFit/TestStatistics/LikelihoodGradientWrapper.h +++ b/roofit/roofitcore/inc/RooFit/TestStatistics/LikelihoodGradientWrapper.h @@ -58,8 +58,8 @@ class LikelihoodGradientWrapper { /// Synchronize minimizer settings with calculators in child classes. virtual void synchronizeWithMinimizer(const ROOT::Math::MinimizerOptions &options); virtual void synchronizeParameterSettings(const std::vector ¶meter_settings); - virtual void synchronizeParameterSettings(ROOT::Math::IMultiGenFunction *function, - const std::vector ¶meter_settings) = 0; + virtual void + synchronizeParameterSettingsImpl(const std::vector ¶meter_settings) = 0; /// Minuit passes in parameter values that may not conform to RooFit internal standards (like applying range /// clipping), but that the specific calculator does need. This function can be implemented to receive these /// Minuit-internal values. diff --git a/roofit/roofitcore/inc/RooMinimizer.h b/roofit/roofitcore/inc/RooMinimizer.h index d8e89fb3be232..54a673b9993e5 100644 --- a/roofit/roofitcore/inc/RooMinimizer.h +++ b/roofit/roofitcore/inc/RooMinimizer.h @@ -17,9 +17,7 @@ #ifndef ROO_MINIMIZER #define ROO_MINIMIZER -#include -#include -#include +#include #include #include @@ -27,13 +25,13 @@ #include #include +#include #include #include #include #include class RooAbsMinimizerFcn; -class RooAbsReal; class RooFitResult; class RooArgList; class RooRealVar; @@ -188,8 +186,6 @@ class RooMinimizer : public TObject { /// Return underlying ROOT fitter object inline auto fitter() { return std::make_unique(&_config, _minimizer.get(), _result.get()); } - ROOT::Math::IMultiGenFunction *getMultiGenFcn() const; - int getNPar() const; void applyCovarianceMatrix(TMatrixDSym const &V); @@ -218,7 +214,7 @@ class RooMinimizer : public TObject { int exec(std::string const &algoName, std::string const &statusName); - bool fitFCN(const ROOT::Math::IMultiGenFunction &fcn); + bool fitFCN(); bool calculateHessErrors(); bool calculateMinosErrors(); diff --git a/roofit/roofitcore/src/RooAbsMinimizerFcn.h b/roofit/roofitcore/src/RooAbsMinimizerFcn.h index 3d3b041bd8964..3b4ebb66d50eb 100644 --- a/roofit/roofitcore/src/RooAbsMinimizerFcn.h +++ b/roofit/roofitcore/src/RooAbsMinimizerFcn.h @@ -26,6 +26,8 @@ #include "RooMinimizer.h" #include "RooRealVar.h" +#include + #include #include #include @@ -37,6 +39,8 @@ class RooAbsMinimizerFcn { RooAbsMinimizerFcn(RooArgList paramList, RooMinimizer *context); virtual ~RooAbsMinimizerFcn() = default; + virtual void initMinimizer(ROOT::Math::Minimizer &) = 0; + /// Informs Minuit through its parameter_settings vector of RooFit parameter properties. bool synchronizeParameterSettings(std::vector ¶meters, bool optConst); /// Like synchronizeParameterSettings, Synchronize informs Minuit through @@ -79,7 +83,6 @@ class RooAbsMinimizerFcn { /// Enable or disable offsetting on the function to be minimized, which enhances numerical precision. virtual void setOffsetting(bool flag) = 0; - virtual ROOT::Math::IMultiGenFunction *getMultiGenFcn() = 0; RooMinimizer::Config const &cfg() const { return _context->_cfg; } diff --git a/roofit/roofitcore/src/RooMinimizer.cxx b/roofit/roofitcore/src/RooMinimizer.cxx index 0d33e1b92ab54..b040f253b0d82 100644 --- a/roofit/roofitcore/src/RooMinimizer.cxx +++ b/roofit/roofitcore/src/RooMinimizer.cxx @@ -53,10 +53,10 @@ automatic PDF optimization. #include "RooHelpers.h" #include "RooMinimizerFcn.h" #include "RooFitResult.h" -#include "TestStatistics/MinuitFcnGrad.h" #include "RooFit/TestStatistics/RooAbsL.h" #include "RooFit/TestStatistics/RooRealL.h" #ifdef ROOFIT_MULTIPROCESS +#include "TestStatistics/MinuitFcnGrad.h" #include "RooFit/MultiProcess/Config.h" #include "RooFit/MultiProcess/ProcessTimer.h" #endif @@ -351,7 +351,7 @@ int RooMinimizer::minimize(const char *type, const char *alg) { auto ctx = makeEvalErrorContext(); - bool ret = fitFCN(*_fcn->getMultiGenFcn()); + bool ret = fitFCN(); determineStatus(ret); } profileStop(); @@ -393,7 +393,7 @@ int RooMinimizer::exec(std::string const &algoName, std::string const &statusNam ret = calculateMinosErrors(); } else { _config.SetMinimizer(_cfg.minimizerType.c_str(), algoName.c_str()); - ret = fitFCN(*_fcn->getMultiGenFcn()); + ret = fitFCN(); } determineStatus(ret); } @@ -775,12 +775,6 @@ void RooMinimizer::profileStop() } } -ROOT::Math::IMultiGenFunction *RooMinimizer::getMultiGenFcn() const -{ - - return _fcn->getMultiGenFcn(); -} - //////////////////////////////////////////////////////////////////////////////// /// Apply results of given external covariance matrix. i.e. propagate its errors /// to all RRV parameter representations and give this matrix instead of the @@ -904,13 +898,13 @@ std::unique_ptr RooMinimizer::makeEvalErrorContext return std::make_unique(m); } -bool RooMinimizer::fitFCN(const ROOT::Math::IMultiGenFunction &fcn) +bool RooMinimizer::fitFCN() { // fit a user provided FCN function // create fit parameter settings // Check number of parameters - unsigned int npar = fcn.NDim(); + unsigned int npar = getNPar(); if (npar == 0) { coutE(Minimization) << "RooMinimizer::fitFCN(): FCN function has zero parameters" << std::endl; return false; @@ -1134,7 +1128,7 @@ bool RooMinimizer::calculateMinosErrors() void RooMinimizer::initMinimizer() { _minimizer = std::unique_ptr(_config.CreateMinimizer()); - _minimizer->SetFunction(*getMultiGenFcn()); + _fcn->initMinimizer(*_minimizer); _minimizer->SetVariables(_config.ParamsSettings().begin(), _config.ParamsSettings().end()); if (_cfg.setInitialCovariance) { diff --git a/roofit/roofitcore/src/RooMinimizerFcn.cxx b/roofit/roofitcore/src/RooMinimizerFcn.cxx index 0ce06a1016c17..d5b3f7d08a7e2 100644 --- a/roofit/roofitcore/src/RooMinimizerFcn.cxx +++ b/roofit/roofitcore/src/RooMinimizerFcn.cxx @@ -179,4 +179,9 @@ RooArgSet RooMinimizerFcn::freezeDisconnectedParameters() const return changedSet; } +void RooMinimizerFcn::initMinimizer(ROOT::Math::Minimizer &minim) +{ + minim.SetFunction(*_multiGenFcn); +} + /// \endcond diff --git a/roofit/roofitcore/src/RooMinimizerFcn.h b/roofit/roofitcore/src/RooMinimizerFcn.h index 6ec402b3bb635..f05009aba68f7 100644 --- a/roofit/roofitcore/src/RooMinimizerFcn.h +++ b/roofit/roofitcore/src/RooMinimizerFcn.h @@ -35,13 +35,14 @@ class RooMinimizerFcn : public RooAbsMinimizerFcn { public: RooMinimizerFcn(RooAbsReal *funct, RooMinimizer *context); + void initMinimizer(ROOT::Math::Minimizer &) override; + std::string getFunctionName() const override; std::string getFunctionTitle() const override; void setOptimizeConstOnFunction(RooAbsArg::ConstOpCode opcode, bool doAlsoTrackingOpt) override; void setOffsetting(bool flag) override; - ROOT::Math::IMultiGenFunction *getMultiGenFcn() override { return _multiGenFcn.get(); } double operator()(const double *x) const; void evaluateGradient(const double *x, double *out) const; diff --git a/roofit/roofitcore/src/TestStatistics/LikelihoodGradientJob.cxx b/roofit/roofitcore/src/TestStatistics/LikelihoodGradientJob.cxx index 95e0cd68011ee..58ae110730382 100644 --- a/roofit/roofitcore/src/TestStatistics/LikelihoodGradientJob.cxx +++ b/roofit/roofitcore/src/TestStatistics/LikelihoodGradientJob.cxx @@ -12,6 +12,7 @@ #include "LikelihoodGradientJob.h" +#include #include "RooFit/MultiProcess/JobManager.h" #include "RooFit/MultiProcess/Messenger.h" #include "RooFit/MultiProcess/ProcessTimer.h" @@ -21,7 +22,6 @@ #include "RooMinimizer.h" #include "Minuit2/Minuit2Minimizer.h" - #include "Minuit2/MnStrategy.h" namespace RooFit { @@ -39,14 +39,8 @@ LikelihoodGradientJob::LikelihoodGradientJob(std::shared_ptr likelihood offsets_previous_ = shared_offset_.offsets(); } -void LikelihoodGradientJob::synchronizeParameterSettings( +void LikelihoodGradientJob::synchronizeParameterSettingsImpl( const std::vector ¶meter_settings) -{ - LikelihoodGradientWrapper::synchronizeParameterSettings(parameter_settings); -} - -void LikelihoodGradientJob::synchronizeParameterSettings( - ROOT::Math::IMultiGenFunction * /*function*/, const std::vector ¶meter_settings) { gradf_.SetInitialGradient(parameter_settings, grad_); } diff --git a/roofit/roofitcore/src/TestStatistics/LikelihoodGradientJob.h b/roofit/roofitcore/src/TestStatistics/LikelihoodGradientJob.h index 35a73eeb2ff02..b54f61606cdeb 100644 --- a/roofit/roofitcore/src/TestStatistics/LikelihoodGradientJob.h +++ b/roofit/roofitcore/src/TestStatistics/LikelihoodGradientJob.h @@ -43,10 +43,7 @@ class LikelihoodGradientJob : public MultiProcess::Job, public LikelihoodGradien private: void run_derivator(unsigned int i_component) const; - void synchronizeParameterSettings(ROOT::Math::IMultiGenFunction *function, - const std::vector ¶meter_settings) override; - // this overload must also be overridden here so that the one above doesn't trigger a overloaded-virtual warning: - void synchronizeParameterSettings(const std::vector ¶meter_settings) override; + void synchronizeParameterSettingsImpl(const std::vector ¶meter_settings) override; void synchronizeWithMinimizer(const ROOT::Math::MinimizerOptions &options) override; void setStrategy(int istrat); diff --git a/roofit/roofitcore/src/TestStatistics/LikelihoodGradientWrapper.cxx b/roofit/roofitcore/src/TestStatistics/LikelihoodGradientWrapper.cxx index f3bc1ab22d87f..d85a25eb75fae 100644 --- a/roofit/roofitcore/src/TestStatistics/LikelihoodGradientWrapper.cxx +++ b/roofit/roofitcore/src/TestStatistics/LikelihoodGradientWrapper.cxx @@ -55,7 +55,7 @@ void LikelihoodGradientWrapper::synchronizeWithMinimizer(const ROOT::Math::Minim void LikelihoodGradientWrapper::synchronizeParameterSettings( const std::vector ¶meter_settings) { - synchronizeParameterSettings(minimizer_->getMultiGenFcn(), parameter_settings); + synchronizeParameterSettingsImpl(parameter_settings); } void LikelihoodGradientWrapper::updateMinuitInternalParameterValues(const std::vector & /*minuit_internal_x*/) diff --git a/roofit/roofitcore/src/TestStatistics/MinuitFcnGrad.cxx b/roofit/roofitcore/src/TestStatistics/MinuitFcnGrad.cxx index 6565da13c6bbe..ab8fe58483f05 100644 --- a/roofit/roofitcore/src/TestStatistics/MinuitFcnGrad.cxx +++ b/roofit/roofitcore/src/TestStatistics/MinuitFcnGrad.cxx @@ -17,6 +17,9 @@ #include "RooAbsPdf.h" #include "RooNaNPacker.h" +#include +#include + #include // std::setprecision namespace RooFit { @@ -24,34 +27,36 @@ namespace TestStatistics { namespace { -class MinuitGradFunctor : public ROOT::Math::IMultiGradFunction { - +class MinuitGradFunctor : public ROOT::Minuit2::FCNBase { public: - MinuitGradFunctor(MinuitFcnGrad const &fcn) : _fcn{fcn} {} - - ROOT::Math::IMultiGradFunction *Clone() const override { return new MinuitGradFunctor(_fcn); } - - unsigned int NDim() const override { return _fcn.getNDim(); } + MinuitGradFunctor(MinuitFcnGrad const &fcn, double errorLevel) : _fcn{fcn}, _up{errorLevel} {} - void Gradient(const double *x, double *grad) const override { return _fcn.Gradient(x, grad); } - - void GradientWithPrevResult(const double *x, double *grad, double *previous_grad, double *previous_g2, - double *previous_gstep) const override + double operator()(std::vector const &v) const override { return _fcn(v.data()); } + double Up() const override { return _up; } + void SetErrorDef(double val) override { _up = val; } + bool HasGradient() const override { return true; } + std::vector Gradient(std::vector const ¶ms) const override { - return _fcn.GradientWithPrevResult(x, grad, previous_grad, previous_g2, previous_gstep); + std::vector grad(_fcn.getNDim()); + _fcn.Gradient(params.data(), grad.data()); + return grad; } - - bool returnsInMinuit2ParameterSpace() const override { return _fcn.returnsInMinuit2ParameterSpace(); } - -private: - double DoEval(const double *x) const override { return _fcn(x); } - - double DoDerivative(double const * /*x*/, unsigned int /*icoord*/) const override + std::vector GradientWithPrevResult(std::vector const &v, double *previous_grad, double *previous_g2, + double *previous_gstep) const override + { + std::vector output(v.size()); + _fcn.GradientWithPrevResult(v.data(), output.data(), previous_grad, previous_g2, previous_gstep); + return output; + } + ROOT::Minuit2::GradientParameterSpace gradParameterSpace() const override { - throw std::runtime_error("MinuitGradFunctor::DoDerivative is not implemented, please use Gradient instead."); + return _fcn.returnsInMinuit2ParameterSpace() ? ROOT::Minuit2::GradientParameterSpace::Internal + : ROOT::Minuit2::GradientParameterSpace::External; } +private: MinuitFcnGrad const &_fcn; + double _up; }; } // namespace @@ -83,10 +88,7 @@ class MinuitGradFunctor : public ROOT::Math::IMultiGradFunction { MinuitFcnGrad::MinuitFcnGrad(const std::shared_ptr &absL, RooMinimizer *context, std::vector ¶meters, LikelihoodMode likelihoodMode, LikelihoodGradientMode likelihoodGradientMode) - : RooAbsMinimizerFcn(*absL->getParameters(), context), - _minuitInternalX(getNDim(), 0), - _minuitExternalX(getNDim(), 0), - _multiGenFcn{std::make_unique(*this)} + : RooAbsMinimizerFcn(*absL->getParameters(), context), _minuitInternalX(getNDim(), 0), _minuitExternalX(getNDim(), 0) { synchronizeParameterSettings(parameters, true); @@ -108,7 +110,7 @@ MinuitFcnGrad::MinuitFcnGrad(const std::shared_ptrsynchronizeParameterSettings(getMultiGenFcn(), parameters); + _gradient->synchronizeParameterSettingsImpl(parameters); // Note: can be different than RooGradMinimizerFcn/LikelihoodGradientSerial, where default options are passed // (ROOT::Math::MinimizerOptions::DefaultStrategy() and ROOT::Math::MinimizerOptions::DefaultErrorDef()) @@ -274,5 +276,11 @@ bool MinuitFcnGrad::Synchronize(std::vector ¶m return returnee; } +void MinuitFcnGrad::initMinimizer(ROOT::Math::Minimizer &minim) +{ + auto &minuit = dynamic_cast(minim); + minuit.SetFCN(getNDim(), std::make_unique(*this, minim.ErrorDef())); +} + } // namespace TestStatistics } // namespace RooFit diff --git a/roofit/roofitcore/src/TestStatistics/MinuitFcnGrad.h b/roofit/roofitcore/src/TestStatistics/MinuitFcnGrad.h index dd4d72d3ed733..138364e8079f7 100644 --- a/roofit/roofitcore/src/TestStatistics/MinuitFcnGrad.h +++ b/roofit/roofitcore/src/TestStatistics/MinuitFcnGrad.h @@ -34,19 +34,18 @@ class MinuitFcnGrad : public RooAbsMinimizerFcn { std::vector ¶meters, LikelihoodMode likelihoodMode, LikelihoodGradientMode likelihoodGradientMode); + void initMinimizer(ROOT::Math::Minimizer &) override; + /// Overridden from RooAbsMinimizerFcn to include gradient strategy synchronization. bool Synchronize(std::vector ¶meter_settings) override; - // used inside Minuit: - inline bool returnsInMinuit2ParameterSpace() const { return _gradient->usesMinuitInternalValues(); } + bool returnsInMinuit2ParameterSpace() const { return _gradient->usesMinuitInternalValues(); } inline void setOptimizeConstOnFunction(RooAbsArg::ConstOpCode opcode, bool doAlsoTrackingOpt) override { applyToLikelihood([&](auto &l) { l.constOptimizeTestStatistic(opcode, doAlsoTrackingOpt); }); } - ROOT::Math::IMultiGenFunction *getMultiGenFcn() override { return _multiGenFcn.get(); } - double operator()(const double *x) const; /// IMultiGradFunction overrides necessary for Minuit @@ -94,8 +93,6 @@ class MinuitFcnGrad : public RooAbsMinimizerFcn { mutable bool offsets_reset_ = false; void syncOffsets() const; - std::unique_ptr _multiGenFcn; - mutable bool _minuitInternalRooFitXMismatch = false; }; From b19f6bfb3a5e20e0a5a275345b809a8a4cec0a97 Mon Sep 17 00:00:00 2001 From: Jonas Rembser Date: Sat, 4 Oct 2025 11:52:16 +0200 Subject: [PATCH 4/7] [Math] Don't leak Minuit 2 implementation details to IFunction classes The option to return external gradients in Minuit 2 internal space is only used for Minuit 2, and these flags should not be present in math function base classes. The same is true for the interface to calculate the gradient considering a previous result. --- math/mathcore/inc/Math/IFunction.h | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/math/mathcore/inc/Math/IFunction.h b/math/mathcore/inc/Math/IFunction.h index ad7b65c24e655..bc0b029d4bceb 100644 --- a/math/mathcore/inc/Math/IFunction.h +++ b/math/mathcore/inc/Math/IFunction.h @@ -91,8 +91,6 @@ namespace ROOT { // if it inherits from ROOT::Math::IGradientFunctionMultiDim. virtual bool HasGradient() const { return false; } - virtual bool returnsInMinuit2ParameterSpace() const { return false; } - /// Evaluate all the vector of function derivatives (gradient) at a point x. /// Derived classes must re-implement it if more efficient than evaluating one at a time virtual void Gradient(const T *x, T *grad) const @@ -103,17 +101,6 @@ namespace ROOT { } } - /// In some cases, the gradient algorithm will use information from the previous step, these can be passed - /// in with this overload. The `previous_*` arrays can also be used to return second derivative and step size - /// so that these can be passed forward again as well at the call site, if necessary. - virtual void GradientWithPrevResult(const T *x, T *grad, T *previous_grad, T *previous_g2, T *previous_gstep) const - { - unsigned int ndim = NDim(); - for (unsigned int icoord = 0; icoord < ndim; ++icoord) { - grad[icoord] = Derivative(x, icoord, previous_grad, previous_g2, previous_gstep); - } - } - /// Optimized method to evaluate at the same time the function value and derivative at a point x. /// Often both value and derivatives are needed and it is often more efficient to compute them at the same time. /// Derived class should implement this method if performances play an important role and if it is faster to From e8707edcbebd5377f3831706d6c3747a8322f957 Mon Sep 17 00:00:00 2001 From: Jonas Rembser Date: Sat, 4 Oct 2025 11:58:08 +0200 Subject: [PATCH 5/7] [Minuit2] Merge FCNAdapter with -GradAdapter and use only std::function The base classes `FCNBase` and `FCNGradientBase` were already unified in 71806b3, and the Minuit code can be further simplified by merging also the derived adapter class for `std::function` objects. This will also make it easier for the users, because then don't have to use different classes depending on whether they pass external gradients or not. We also make the new `FCNAdapter` purely `std::function` based, instead of using a template parameter. This makes is possible to use Minuit2 directly from Python, as we can benefit from the `std::function` pythonization. It also makes the Minuit2 code more consistent, because `std::function` was already used for the G2 (second derivatives) and the Hessian. --- math/minuit2/CMakeLists.txt | 1 - math/minuit2/inc/Minuit2/FCNAdapter.h | 80 +++++++++++-- math/minuit2/inc/Minuit2/FCNBase.h | 4 + math/minuit2/inc/Minuit2/FCNGradAdapter.h | 135 ---------------------- math/minuit2/src/CMakeLists.txt | 1 - math/minuit2/src/Minuit2Minimizer.cxx | 16 +-- 6 files changed, 86 insertions(+), 151 deletions(-) delete mode 100644 math/minuit2/inc/Minuit2/FCNGradAdapter.h diff --git a/math/minuit2/CMakeLists.txt b/math/minuit2/CMakeLists.txt index 87dfb34003a35..9ebf3f7e888d9 100644 --- a/math/minuit2/CMakeLists.txt +++ b/math/minuit2/CMakeLists.txt @@ -26,7 +26,6 @@ if(CMAKE_PROJECT_NAME STREQUAL ROOT) Minuit2/ExternalInternalGradientCalculator.h Minuit2/FCNAdapter.h Minuit2/FCNBase.h - Minuit2/FCNGradAdapter.h Minuit2/FCNGradientBase.h Minuit2/FumiliBuilder.h Minuit2/FumiliChi2FCN.h diff --git a/math/minuit2/inc/Minuit2/FCNAdapter.h b/math/minuit2/inc/Minuit2/FCNAdapter.h index 5aa4ae7d1742a..2ffd0b012fd3a 100644 --- a/math/minuit2/inc/Minuit2/FCNAdapter.h +++ b/math/minuit2/inc/Minuit2/FCNAdapter.h @@ -15,6 +15,7 @@ #include #include +#include namespace ROOT { @@ -23,7 +24,7 @@ namespace Minuit2 { /** -template wrapped class for adapting to FCNBase signature +Wrapper class for std::function objects, adapting to FCNBase signature. @author Lorenzo Moneta @@ -31,21 +32,86 @@ template wrapped class for adapting to FCNBase signature */ -template class FCNAdapter : public FCNBase { public: - FCNAdapter(const Function &f, double up = 1.) : fFunc(f), fUp(up) {} + FCNAdapter(std::function f, double up = 1.) : fUp(up), fFunc(std::move(f)) {} + + bool HasGradient() const override { return bool(fGradFunc); } + bool HasG2() const override { return bool(fG2Func); } + bool HasHessian() const override { return bool(fHessianFunc); } + + double operator()(std::vector const &v) const override { return fFunc(v.data()); } - double operator()(std::vector const& v) const override { return fFunc.operator()(&v[0]); } - double operator()(const double *v) const { return fFunc.operator()(v); } double Up() const override { return fUp; } + std::vector Gradient(std::vector const &v) const override + { + std::vector output(v.size()); + fGradFunc(v.data(), output.data()); + return output; + } + + /// return second derivatives (diagonal of the Hessian matrix) + std::vector G2(std::vector const &x) const override + { + std::vector output; + if (fG2Func) + return fG2Func(x); + if (fHessianFunc) { + std::size_t n = x.size(); + output.resize(n); + if (fHessian.empty()) + fHessian.resize(n * n); + fHessianFunc(x, fHessian.data()); + if (!fHessian.empty()) { + // get diagonal element of h + for (unsigned int i = 0; i < n; i++) + output[i] = fHessian[i * n + i]; + } + } + return output; + } + + /// compute Hessian. Return Hessian as a std::vector of size(n*n) + std::vector Hessian(std::vector const &x) const override + { + std::vector output; + if (fHessianFunc) { + std::size_t n = x.size(); + output.resize(n * n); + bool ret = fHessianFunc(x, output.data()); + if (!ret) { + output.clear(); + fHessianFunc = nullptr; + } + } + + return output; + } + + void SetGradientFunction(std::function f) { fGradFunc = std::move(f); } + void SetG2Function(std::function(std::vector const &)> f) { fG2Func = std::move(f); } + void SetHessianFunction(std::function const &, double *)> f) + { + fHessianFunc = std::move(f); + } + void SetErrorDef(double up) override { fUp = up; } private: - const Function &fFunc; - double fUp; + using Function = std::function; + using GradFunction = std::function; + using G2Function = std::function(std::vector const &)>; + using HessianFunction = std::function const &, double *)>; + + double fUp = 1.; + mutable std::vector fHessian; + + Function fFunc; + GradFunction fGradFunc; + G2Function fG2Func; + mutable HessianFunction fHessianFunc; }; } // end namespace Minuit2 diff --git a/math/minuit2/inc/Minuit2/FCNBase.h b/math/minuit2/inc/Minuit2/FCNBase.h index 444ad7b8d7609..bc84c79f2aa22 100644 --- a/math/minuit2/inc/Minuit2/FCNBase.h +++ b/math/minuit2/inc/Minuit2/FCNBase.h @@ -113,6 +113,10 @@ class FCNBase { virtual bool HasGradient() const { return false; } virtual std::vector Gradient(std::vector const&) const { return {}; } + + /// In some cases, the gradient algorithm will use information from the previous step, these can be passed + /// in with this overload. The `previous_*` arrays can also be used to return second derivative and step size + /// so that these can be passed forward again as well at the call site, if necessary. virtual std::vector GradientWithPrevResult(std::vector const& parameters, double * /*previous_grad*/, double * /*previous_g2*/, double * /*previous_gstep*/) const { diff --git a/math/minuit2/inc/Minuit2/FCNGradAdapter.h b/math/minuit2/inc/Minuit2/FCNGradAdapter.h deleted file mode 100644 index 5acaafdaddbbf..0000000000000 --- a/math/minuit2/inc/Minuit2/FCNGradAdapter.h +++ /dev/null @@ -1,135 +0,0 @@ -// @(#)root/minuit2:$Id$ -// Author: L. Moneta 10/2006 - -/********************************************************************** - * * - * Copyright (c) 2006 ROOT Foundation, CERN/PH-SFT * - * * - **********************************************************************/ - -#ifndef ROOT_Minuit2_FCNGradAdapter -#define ROOT_Minuit2_FCNGradAdapter - -#include "Minuit2/FCNBase.h" -#include "Minuit2/MnPrint.h" - -#include -#include - -namespace ROOT { - -namespace Minuit2 { - -/** - - -template wrapped class for adapting to FCNBase signature a IGradFunction - -@author Lorenzo Moneta - -@ingroup Minuit - -*/ - -template -class FCNGradAdapter : public FCNBase { - -public: - FCNGradAdapter(const Function &f, double up = 1.) : fFunc(f), fUp(up), fGrad(std::vector(fFunc.NDim())) {} - - bool HasGradient() const override { return true; } - - double operator()(std::vector const& v) const override { return fFunc.operator()(&v[0]); } - double operator()(const double *v) const { return fFunc.operator()(v); } - - double Up() const override { return fUp; } - - std::vector Gradient(std::vector const& v) const override - { - fFunc.Gradient(&v[0], &fGrad[0]); - return fGrad; - } - std::vector GradientWithPrevResult(std::vector const& v, double *previous_grad, double *previous_g2, - double *previous_gstep) const override - { - fFunc.GradientWithPrevResult(&v[0], &fGrad[0], previous_grad, previous_g2, previous_gstep); - return fGrad; - } - - GradientParameterSpace gradParameterSpace() const override { - if (fFunc.returnsInMinuit2ParameterSpace()) { - return GradientParameterSpace::Internal; - } else { - return GradientParameterSpace::External; - } - } - - /// return second derivatives (diagonal of the Hessian matrix) - std::vector G2(std::vector const& x) const override { - if (fG2Func) - return fG2Func(x); - if (fHessianFunc) { - unsigned int n = fFunc.NDim(); - if (fG2Vec.empty() ) fG2Vec.resize(n); - if (fHessian.empty() ) fHessian.resize(n*n); - fHessianFunc(x,fHessian.data()); - if (!fHessian.empty()) { - // get diagonal element of h - for (unsigned int i = 0; i < n; i++) - fG2Vec[i] = fHessian[i*n+i]; - } - else fG2Vec.clear(); - } - else - if (!fG2Vec.empty()) fG2Vec.clear(); - return fG2Vec; - } - - /// compute Hessian. Return Hessian as a std::vector of size(n*n) - std::vector Hessian(std::vector const& x ) const override { - unsigned int n = fFunc.NDim(); - if (fHessianFunc) { - if (fHessian.empty() ) fHessian.resize(n * n); - bool ret = fHessianFunc(x,fHessian.data()); - if (!ret) { - fHessian.clear(); - fHessianFunc = nullptr; - } - } else { - fHessian.clear(); - } - - return fHessian; - } - - bool HasG2() const override { - return bool(fG2Func); - } - bool HasHessian() const override { - return bool(fHessianFunc); - } - - template - void SetG2Function(Func f) { fG2Func = f;} - - template - void SetHessianFunction(Func f) { fHessianFunc = f;} - - void SetErrorDef(double up) override { fUp = up; } - -private: - const Function &fFunc; - double fUp; - mutable std::vector fGrad; - mutable std::vector fHessian; - mutable std::vector fG2Vec; - - std::function(std::vector const& )> fG2Func; - mutable std::function const& , double *)> fHessianFunc; -}; - -} // end namespace Minuit2 - -} // end namespace ROOT - -#endif // ROOT_Minuit2_FCNGradAdapter diff --git a/math/minuit2/src/CMakeLists.txt b/math/minuit2/src/CMakeLists.txt index 332fd4706d016..4233480c2ea2a 100644 --- a/math/minuit2/src/CMakeLists.txt +++ b/math/minuit2/src/CMakeLists.txt @@ -16,7 +16,6 @@ set(MINUIT2_HEADERS ExternalInternalGradientCalculator.h FCNAdapter.h FCNBase.h - FCNGradAdapter.h FCNGradientBase.h FumiliBuilder.h FumiliChi2FCN.h diff --git a/math/minuit2/src/Minuit2Minimizer.cxx b/math/minuit2/src/Minuit2Minimizer.cxx index 975f1bab412fc..0bb724ecffbf3 100644 --- a/math/minuit2/src/Minuit2Minimizer.cxx +++ b/math/minuit2/src/Minuit2Minimizer.cxx @@ -19,7 +19,6 @@ #include "Minuit2/FCNAdapter.h" #include "Minuit2/FumiliFCNAdapter.h" -#include "Minuit2/FCNGradAdapter.h" #include "Minuit2/FunctionMinimum.h" #include "Minuit2/MnMigrad.h" #include "Minuit2/MnMinos.h" @@ -375,11 +374,14 @@ void Minuit2Minimizer::SetFunction(const ROOT::Math::IMultiGenFunction &func) fDim = func.NDim(); const bool hasGrad = func.HasGradient(); if (!fUseFumili) { - if (hasGrad) - fMinuitFCN = std::make_unique>( - dynamic_cast(func), ErrorDef()); - else - fMinuitFCN = std::make_unique>(func, ErrorDef()); + auto lambdaFunc = [&func](double const *params) { return func(params); }; + auto adapter = std::make_unique(lambdaFunc, ErrorDef()); + if (hasGrad) { + auto const &gradFunc = dynamic_cast(func); + auto lambdaGrad = [&gradFunc](double const *params, double *grad) { return gradFunc.Gradient(params, grad); }; + adapter->SetGradientFunction(lambdaGrad); + } + fMinuitFCN = std::move(adapter); return; } if (hasGrad) { @@ -409,7 +411,7 @@ void Minuit2Minimizer::SetHessianFunction(std::function *>(fMinuitFCN.get()); + auto fcn = static_cast(fMinuitFCN.get()); if (!fcn) return; fcn->SetHessianFunction(hfunc); } From 64d8c20759555c2f460edfd435a0839f308c9a82 Mon Sep 17 00:00:00 2001 From: Jonas Rembser Date: Fri, 3 Oct 2025 20:38:48 +0200 Subject: [PATCH 6/7] [Minuit2] Improve FCNBase and FCNAdapter documentation --- math/minuit2/inc/Minuit2/FCNAdapter.h | 113 +++++++++++++---- math/minuit2/inc/Minuit2/FCNBase.h | 171 ++++++++++++-------------- 2 files changed, 166 insertions(+), 118 deletions(-) diff --git a/math/minuit2/inc/Minuit2/FCNAdapter.h b/math/minuit2/inc/Minuit2/FCNAdapter.h index 2ffd0b012fd3a..d1725a6d0a30b 100644 --- a/math/minuit2/inc/Minuit2/FCNAdapter.h +++ b/math/minuit2/inc/Minuit2/FCNAdapter.h @@ -17,34 +17,63 @@ #include #include -namespace ROOT { - -namespace Minuit2 { - -/** - - -Wrapper class for std::function objects, adapting to FCNBase signature. - -@author Lorenzo Moneta - -@ingroup Minuit - -*/ - +namespace ROOT::Minuit2 { + +/// Adapter class to wrap user-provided functions into the FCNBase interface. +/// +/// This class allows users to supply their function (and optionally its gradient, +/// diagonal second derivatives, and Hessian) in the form of `std::function` objects. +/// It adapts these functions so that they can be used transparently with the MINUIT +/// minimizers via the `FCNBase` interface. +/// +/// Typical usage: +/// - Pass the function to minimize to the constructor. +/// - Optionally set the gradient, G2 (second derivative diagonal), or Hessian +/// functions using the provided setter methods. +/// - MINUIT will then query these functions if available, or fall back to numerical +/// approximations if they are not provided. +/// +/// \ingroup Minuit class FCNAdapter : public FCNBase { public: + /// Construct an adapter around a user-provided function. + /// + /// @param f Function to minimize. It must take a pointer to the parameter array + /// (`double const*`) and return the function value. + /// @param up Error definition parameter (defaults to 1.0). FCNAdapter(std::function f, double up = 1.) : fUp(up), fFunc(std::move(f)) {} + /// Indicate whether an analytic gradient has been provided. + /// + /// @return `true` if a gradient function was set, otherwise `false`. bool HasGradient() const override { return bool(fGradFunc); } + + /// Indicate whether analytic second derivatives (diagonal of the Hessian) are available. + /// + /// @return `true` if a G2 function or a Hessian function has been set, otherwise `false`. bool HasG2() const override { return bool(fG2Func); } + + /// Indicate whether an analytic Hessian has been provided. + /// + /// @return `true` if a Hessian function was set, otherwise `false`. bool HasHessian() const override { return bool(fHessianFunc); } + /// Evaluate the function at the given parameter vector. + /// + /// @param v Parameter vector. + /// @return Function value at the specified parameters. double operator()(std::vector const &v) const override { return fFunc(v.data()); } + /// Return the error definition parameter (`up`). + /// + /// @return Current error definition value. double Up() const override { return fUp; } + /// Evaluate the gradient of the function at the given parameter vector. + /// + /// @param v Parameter vector. + /// @return Gradient vector (∂f/∂xᵢ) at the specified parameters. std::vector Gradient(std::vector const &v) const override { std::vector output(v.size()); @@ -52,7 +81,13 @@ class FCNAdapter : public FCNBase { return output; } - /// return second derivatives (diagonal of the Hessian matrix) + /// Return the diagonal elements of the Hessian (second derivatives). + /// + /// If a G2 function is set, it is used directly. If only a Hessian function + /// is available, the diagonal is extracted from the full Hessian. + /// + /// @param x Parameter vector. + /// @return Vector of second derivatives (one per parameter). std::vector G2(std::vector const &x) const override { std::vector output; @@ -65,7 +100,7 @@ class FCNAdapter : public FCNBase { fHessian.resize(n * n); fHessianFunc(x, fHessian.data()); if (!fHessian.empty()) { - // get diagonal element of h + // Extract diagonal elements of Hessian for (unsigned int i = 0; i < n; i++) output[i] = fHessian[i * n + i]; } @@ -73,7 +108,13 @@ class FCNAdapter : public FCNBase { return output; } - /// compute Hessian. Return Hessian as a std::vector of size(n*n) + /// Return the full Hessian matrix. + /// + /// If a Hessian function is available, it is used to fill the matrix. + /// If the Hessian function fails, it is cleared and not used again. + /// + /// @param x Parameter vector. + /// @return Flattened Hessian matrix in row-major order. std::vector Hessian(std::vector const &x) const override { std::vector output; @@ -90,13 +131,33 @@ class FCNAdapter : public FCNBase { return output; } + /// Set the analytic gradient function. + /// + /// @param f Gradient function of type `void(double const*, double*)`. + /// The first argument is the parameter array, the second is + /// the output array for the gradient values. void SetGradientFunction(std::function f) { fGradFunc = std::move(f); } + + /// Set the function providing diagonal second derivatives (G2). + /// + /// @param f Function taking a parameter vector and returning the + /// diagonal of the Hessian matrix as a vector. void SetG2Function(std::function(std::vector const &)> f) { fG2Func = std::move(f); } + + /// Set the function providing the full Hessian matrix. + /// + /// @param f Function of type `bool(std::vector const&, double*)`. + /// The first argument is the parameter vector, the second is + /// the output buffer (flattened matrix). The return value + /// should be `true` on success, `false` on failure. void SetHessianFunction(std::function const &, double *)> f) { fHessianFunc = std::move(f); } + /// Update the error definition parameter. + /// + /// @param up New error definition value. void SetErrorDef(double up) override { fUp = up; } private: @@ -105,17 +166,15 @@ class FCNAdapter : public FCNBase { using G2Function = std::function(std::vector const &)>; using HessianFunction = std::function const &, double *)>; - double fUp = 1.; - mutable std::vector fHessian; + double fUp = 1.; ///< Error definition parameter. + mutable std::vector fHessian; ///< Storage for intermediate Hessian values. - Function fFunc; - GradFunction fGradFunc; - G2Function fG2Func; - mutable HessianFunction fHessianFunc; + Function fFunc; ///< Wrapped function to minimize. + GradFunction fGradFunc; ///< Optional gradient function. + G2Function fG2Func; ///< Optional diagonal second-derivative function. + mutable HessianFunction fHessianFunc; ///< Optional Hessian function. }; -} // end namespace Minuit2 - -} // end namespace ROOT +} // namespace ROOT::Minuit2 #endif // ROOT_Minuit2_FCNAdapter diff --git a/math/minuit2/inc/Minuit2/FCNBase.h b/math/minuit2/inc/Minuit2/FCNBase.h index bc84c79f2aa22..245c02ac05a6c 100644 --- a/math/minuit2/inc/Minuit2/FCNBase.h +++ b/math/minuit2/inc/Minuit2/FCNBase.h @@ -16,130 +16,119 @@ #include -namespace ROOT { +namespace ROOT::Minuit2 { -namespace Minuit2 { - -/** - - \defgroup Minuit Minuit2 Minimization Library - - New Object-oriented implementation of the MINUIT minimization package. - More information is available at the home page of the \ref Minuit2Page "Minuit2" minimization package". - - \ingroup Math -*/ +/// \defgroup Minuit Minuit2 Minimization Library +/// +/// New Object-oriented implementation of the MINUIT minimization package. +/// More information is available at the home page of the \ref Minuit2Page "Minuit2" minimization package". +/// +/// \ingroup Math enum class GradientParameterSpace { - External, Internal + External, + Internal }; -//______________________________________________________________________________ -/** - - -Interface (abstract class) defining the function to be minimized, which has to be implemented by the user. - -@author Fred James and Matthias Winkler; modified by Andras Zsenei and Lorenzo Moneta - -\ingroup Minuit - - */ +/// Interface (abstract class) defining the function to be minimized, which has to be implemented by the user. +/// +/// \ingroup Minuit class FCNBase { public: - virtual ~FCNBase() = default; - /** - - The meaning of the vector of parameters is of course defined by the user, - who uses the values of those parameters to calculate their function Value. - The order and the position of these parameters is strictly the one specified - by the user when supplying the starting values for minimization. The starting - values must be specified by the user, either via an std::vector or the - MnUserParameters supplied as input to the MINUIT minimizers such as - VariableMetricMinimizer or MnMigrad. Later values are determined by MINUIT - as it searches for the Minimum or performs whatever analysis is requested by - the user. - - @param v function parameters as defined by the user. - - @return the Value of the function. - - @see MnUserParameters - @see VariableMetricMinimizer - @see MnMigrad - - */ + /// The meaning of the vector of parameters is of course defined by the user, + /// who uses the values of those parameters to calculate their function Value. + /// The order and the position of these parameters is strictly the one specified + /// by the user when supplying the starting values for minimization. The starting + /// values must be specified by the user, either via an std::vector or the + /// MnUserParameters supplied as input to the MINUIT minimizers such as + /// VariableMetricMinimizer or MnMigrad. Later values are determined by MINUIT + /// as it searches for the Minimum or performs whatever analysis is requested by + /// the user. + /// + /// @param v function parameters as defined by the user. + /// + /// @return the Value of the function. + /// + /// @see MnUserParameters + /// @see VariableMetricMinimizer + /// @see MnMigrad virtual double operator()(std::vector const &v) const = 0; - /** - - Error definition of the function. MINUIT defines Parameter errors as the - change in Parameter Value required to change the function Value by up. Normally, - for chisquared fits it is 1, and for negative log likelihood, its Value is 0.5. - If the user wants instead the 2-sigma errors for chisquared fits, it becomes 4, - as Chi2(x+n*sigma) = Chi2(x) + n*n. - - Comment a little bit better with links!!!!!!!!!!!!!!!!! - - */ + /// Error definition of the function. MINUIT defines Parameter errors as the + /// change in Parameter Value required to change the function Value by up. Normally, + /// for chisquared fits it is 1, and for negative log likelihood, its Value is 0.5. + /// If the user wants instead the 2-sigma errors for chisquared fits, it becomes 4, + /// as Chi2(x+n*sigma) = Chi2(x) + n*n. + /// + /// Comment a little bit better with links!!!!!!!!!!!!!!!!! virtual double ErrorDef() const { return Up(); } - /** - - Error definition of the function. MINUIT defines Parameter errors as the - change in Parameter Value required to change the function Value by up. Normally, - for chisquared fits it is 1, and for negative log likelihood, its Value is 0.5. - If the user wants instead the 2-sigma errors for chisquared fits, it becomes 4, - as Chi2(x+n*sigma) = Chi2(x) + n*n. - - \todo Comment a little bit better with links!!!!!!!!!!!!!!!!! Idem for ErrorDef() - - */ + /// Error definition of the function. MINUIT defines Parameter errors as the + /// change in Parameter Value required to change the function Value by up. Normally, + /// for chisquared fits it is 1, and for negative log likelihood, its Value is 0.5. + /// If the user wants instead the 2-sigma errors for chisquared fits, it becomes 4, + /// as Chi2(x+n*sigma) = Chi2(x) + n*n. + /// + /// \todo Comment a little bit better with links!!!!!!!!!!!!!!!!! Idem for ErrorDef() virtual double Up() const = 0; - /** - add interface to set dynamically a new error definition - Re-implement this function if needed. - */ - virtual void SetErrorDef(double){}; + /// add interface to set dynamically a new error definition + /// Re-implement this function if needed. + virtual void SetErrorDef(double) {}; virtual bool HasGradient() const { return false; } - virtual std::vector Gradient(std::vector const&) const { return {}; } - - /// In some cases, the gradient algorithm will use information from the previous step, these can be passed - /// in with this overload. The `previous_*` arrays can also be used to return second derivative and step size - /// so that these can be passed forward again as well at the call site, if necessary. - virtual std::vector GradientWithPrevResult(std::vector const& parameters, double * /*previous_grad*/, + /// Return the gradient vector of the function at the given parameter point. + /// + /// By default, returns an empty vector (no analytic gradient provided). + /// Override this method if an analytic gradient is available. + /// + /// @param v Parameter vector. + /// @return Gradient vector with respect to the parameters. + virtual std::vector Gradient(std::vector const &) const { return {}; } + + /// \warning Not meant to be overridden! This is a requirement for an + /// internal optimization in RooFit that might go away with any refactoring. + virtual std::vector GradientWithPrevResult(std::vector const ¶meters, double * /*previous_grad*/, double * /*previous_g2*/, double * /*previous_gstep*/) const { return Gradient(parameters); }; - virtual GradientParameterSpace gradParameterSpace() const { - return GradientParameterSpace::External; - }; - - /// return second derivatives (diagonal of the Hessian matrix) - virtual std::vector G2(std::vector const&) const { return {};} - - /// return Hessian - virtual std::vector Hessian(std::vector const&) const { return {};} + /// \warning Not meant to be overridden! This is a requirement for an + /// internal optimization in RooFit that might go away with any refactoring. + virtual GradientParameterSpace gradParameterSpace() const { return GradientParameterSpace::External; }; + + /// Return the diagonal elements of the Hessian (second derivatives). + /// + /// By default, returns an empty vector. Override this method if analytic second derivatives + /// (per-parameter curvature) are available. + /// + /// @param v Parameter vector. + /// @return Vector of second derivatives with respect to each parameter. + virtual std::vector G2(std::vector const &) const { return {}; } + + /// Return the full Hessian matrix of the function. + /// + /// By default, returns an empty vector. Override this method if the full analytic Hessian + /// (matrix of second derivatives) is available. + /// + /// @param v Parameter vector. + /// @return Flattened Hessian matrix. + virtual std::vector Hessian(std::vector const &) const { return {}; } virtual bool HasHessian() const { return false; } virtual bool HasG2() const { return false; } }; -} // namespace Minuit2 - -} // namespace ROOT +} // namespace ROOT::Minuit2 #endif // ROOT_Minuit2_FCNBase From 22b91bb213591580ed9e56706f71f756e2939331 Mon Sep 17 00:00:00 2001 From: Jonas Rembser Date: Fri, 3 Oct 2025 20:49:11 +0200 Subject: [PATCH 7/7] [RelNotes] Mention Minuit 2 interface changes related to FCN adapters --- README/ReleaseNotes/v638/index.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README/ReleaseNotes/v638/index.md b/README/ReleaseNotes/v638/index.md index a6c8e7e7ab670..c64be00f0da4f 100644 --- a/README/ReleaseNotes/v638/index.md +++ b/README/ReleaseNotes/v638/index.md @@ -70,6 +70,11 @@ The following people have contributed to this new version: ### Minuit2 * Behavior change: building ROOT using `minuit2_omp=ON` option no longer enables OpenMP parallelization by default. One has to call now additionally GradientCalculator::SetParallelOMP(). +* The functionality of the `FCNGradAdapter` got absorbed into the `FCNAdapter`. This also means that the `FCNGradAdapter.h` header is gone. +* The `ROOT::Minuit2::FCNAdapter` is no longer templated and now handle only functions with this exact signatures: + * `double(double const *)` for the wrapped function + * `void(double const *, double *)` for the gradient + The advantage of this restriction is that the `FCNAdapter` can now also wrap any Python function that respects this interface. ## RooFit