Skip to content

Commit

Permalink
LM obj-fun: add value() and tests
Browse files Browse the repository at this point in the history
- compute_objective_function() is now implemented as well
- Tests are (almost) a duplicate of the ProjData version, but
  gradient-test is disabled as for this sample data it is too slow
  • Loading branch information
KrisThielemans committed May 13, 2024
1 parent 9f45458 commit dcf76a0
Show file tree
Hide file tree
Showing 5 changed files with 371 additions and 21 deletions.
23 changes: 21 additions & 2 deletions documentation/release_6.1.htm
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,15 @@ <h3>New functionality</h3>
Read TOF bin order from interfile header (see <a href=https://github.com/UCL/STIR/pull/1389> PR #1389</a>)
</li>
<li>
<code>GeneralisedObjectiveFunction</code> has 2 new papers to add compute full gradient
<code>PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByBin</code> can now compute the value
as well as <code>accumulate_Hessian_times_input</code>.
<br>
<a href=https://github.com/UCL/STIR/pull/1418> PR #1418</a>
</li>
<li>
<code>GeneralisedObjectiveFunction</code> has 2 new members to compute the full gradient
(<code>compute_gradient</code> and <code>compute_gradient_without_penalty</code>). Previously,
only subset gradient were available.
only subset gradients were available.
<br>
<a href=https://github.com/UCL/STIR/pull/1418> PR #1418</a>
</li>
Expand All @@ -68,6 +74,12 @@ <h3>New functionality</h3>

<h3>Changed functionality</h3>
<ul>
<li>
<code>PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByBin</code> now computes the gradient
multi-threaded (if OpenMP is enabled), even if caching to file of the list-mode file is not enabled.
<br>
<a href=https://github.com/UCL/STIR/pull/1418> PR #1418</a>
</li>
<li>
Accumulation in computation of priors now uses doubles, which could result in slightly better precision. <a href=https://github.com/UCL/STIR/pull/1410>Part of PR #1410</a>.
</li>
Expand Down Expand Up @@ -136,6 +148,13 @@ <h4>recon_test_pack changes</h4>
</ul>

<h4>C++ tests</h4>
<ul>
<li>
objective function and priors now have a numerical test for <code>accumulate_Hessian_times_input</code>
<br>
<a href=https://github.com/UCL/STIR/pull/1418> PR #1418</a>
</li>
</ul>

</body>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,13 @@ class PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByB

PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByBin();

//! Computes the value of the objective function at the \a current_estimate.
/*!
\warning If <code>add_sensitivity = false</code> and <code>use_subset_sensitivities = false</code> will return an error
because the value will not be correct. Try <code>use_subset_sensitivities = true</code>.
*/
double actual_compute_objective_function_without_penalty(const TargetT& current_estimate, const int subset_num) override;

//! Computes the gradient of the objective function at the \a current_estimate overwriting \a gradient.
/*!
\warning If <code>add_sensitivity = false</code> and <code>use_subset_sensitivities = false</code> will return an error
Expand Down Expand Up @@ -107,13 +114,6 @@ class PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByB
#endif

protected:
/*! \todo this function is not implemented yet and currently calls error() */
double actual_compute_objective_function_without_penalty(const TargetT& current_estimate, const int subset_num) override
{ // TODO
error("compute_objective_function_without_penalty Not implemented yet");
return 0;
}

Succeeded set_up_before_sensitivity(shared_ptr<const TargetT> const& target_sptr) override;

void add_subset_sensitivity(TargetT& sensitivity, const int subset_num) const override;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -751,13 +751,14 @@ constexpr float max_quotient = 10000.F;
\sum_e A_e^t (y_e/(A_e lambda+ c))
*/
template <bool do_gradient, bool do_value>
inline void
LM_gradient(DiscretisedDensity<3, float>& output_image,
const ProjMatrixElemsForOneBin& row,
const float add_term,
const Bin& measured_bin,
const DiscretisedDensity<3, float>& input_image,
double* value_ptr)
LM_gradient_and_value(DiscretisedDensity<3, float>& output_image,
const ProjMatrixElemsForOneBin& row,
const float add_term,
const Bin& measured_bin,
const DiscretisedDensity<3, float>& input_image,
double* value_ptr)
{
Bin fwd_bin = measured_bin;
fwd_bin.set_bin_value(0.0f);
Expand All @@ -767,18 +768,22 @@ LM_gradient(DiscretisedDensity<3, float>& output_image,
if (measured_bin.get_bin_value() > max_quotient * fwd)
{
// cancel singularity
if (value_ptr)
if (do_value)
{
assert(value_ptr);
const auto num = measured_bin.get_bin_value();
*value_ptr -= num * log(double(num / max_quotient));
return;
}
}
const auto measured_div_fwd = measured_bin.get_bin_value() / fwd;
if (do_gradient)
{
const auto measured_div_fwd = measured_bin.get_bin_value() / fwd;

fwd_bin.set_bin_value(measured_div_fwd);
row.back_project(output_image, fwd_bin);
if (value_ptr)
fwd_bin.set_bin_value(measured_div_fwd);
row.back_project(output_image, fwd_bin);
}
if (do_value)
*value_ptr -= measured_bin.get_bin_value() * log(double(fwd));
}

Expand Down Expand Up @@ -835,7 +840,7 @@ LM_gradient_distributable_computation(const shared_ptr<ProjMatrixByBin> PM_sptr,
has_add,
accumulate,
value_ptr,
LM_gradient);
LM_gradient_and_value<true, false>);
}

void
Expand Down Expand Up @@ -865,6 +870,45 @@ LM_Hessian_distributable_computation(const shared_ptr<ProjMatrixByBin> PM_sptr,
H_func);
}

template <typename TargetT>
double
PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByBin<
TargetT>::actual_compute_objective_function_without_penalty(const TargetT& current_estimate, const int subset_num)
{
assert(subset_num >= 0);
assert(subset_num < this->num_subsets);
if (!this->get_use_subset_sensitivities() && this->num_subsets > 1)
error("PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByBin::"
"actual_compute_subset_gradient_without_penalty(): cannot subtract subset sensitivity because "
"use_subset_sensitivities is false. This will result in an error in the gradient computation.");

double accum = 0.;
unsigned int icache = 0;
while (true)
{
bool stop = this->load_listmode_batch(icache);
LM_distributable_computation(this->PM_sptr,
this->proj_data_info_sptr,
nullptr,
&current_estimate,
record_cache,
subset_num,
this->num_subsets,
this->has_add,
/* accumulate */ true,
&accum,
LM_gradient_and_value<false, true>);
++icache;
if (stop)
break;
}
std::inner_product(current_estimate.begin_all_const(),
current_estimate.end_all_const(),
this->get_subset_sensitivity(subset_num).begin_all_const(),
accum);
return accum;
}

template <typename TargetT>
void
PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByBin<
Expand Down
4 changes: 4 additions & 0 deletions src/recon_test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ set(${dir_INVOLVED_TEST_EXE_SOURCES}
recontest.cxx
test_data_processor_projectors.cxx
test_OSMAPOSL.cxx
test_PoissonLogLikelihoodWithLinearModelForMeanAndListModeWithProjMatrixByBin.cxx
)

if (DOWNLOAD_ZENODO_TEST_DATA)
Expand Down Expand Up @@ -128,6 +129,9 @@ include(stir_test_exe_targets)
# a test that uses MPI
create_stir_mpi_test(test_PoissonLogLikelihoodWithLinearModelForMeanAndProjData.cxx "${STIR_LIBRARIES}" $<TARGET_OBJECTS:stir_registries>)

ADD_TEST(test_PoissonLogLikelihoodWithLinearModelForMeanAndListModeWithProjMatrixByBin
test_PoissonLogLikelihoodWithLinearModelForMeanAndListModeWithProjMatrixByBin "${CMAKE_SOURCE_DIR}/recon_test_pack/PET_ACQ_small.l.hdr.STIR")

# fwdtest and bcktest could be useful on their own, so we'll add them to the installation targets
if (BUILD_TESTING)
install(TARGETS fwdtest bcktest DESTINATION bin)
Expand Down

0 comments on commit dcf76a0

Please sign in to comment.