Skip to content

Commit

Permalink
List-mode objective function: split caching in separate functions
Browse files Browse the repository at this point in the history
Functionality is identical, but the reading function is now separate.
This prepares for calling this function also when caching is not used.
  • Loading branch information
KrisThielemans committed May 9, 2024
1 parent feb6d85 commit 2270c16
Show file tree
Hide file tree
Showing 2 changed files with 149 additions and 137 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
/*
Copyright (C) 2003- 2011, Hammersmith Imanet Ltd
Copyright (C) 2015, Univ. of Leeds
Copyright (C) 2016, 2022 UCL
Copyright (C) 2016, 2022, 2024 UCL
Copyright (C) 2021, University of Pennsylvania
SPDX-License-Identifier: Apache-2.0
Expand Down Expand Up @@ -148,12 +148,21 @@ class PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByB
bool skip_balanced_subsets;

private:
//! Cache of the listmode file
//! Cache of the current "batch" in the listmode file
/*! \todo Move this higher-up in the hierarchy as it doesn't depend on ProjMatrixByBin
*/
std::vector<BinAndCorr> record_cache;

//! This function caches the listmode file, or reads it. It is run during set_up()
//! This function reads the next "batch" of data from the listmode file.
/*!
This function keeps on reading from the current position in the list-mode data and stores
prompts events and additive terms in \c record_cache.
\return \c true if there are no more events to read, \c false otherwise
\todo Move this function higher-up in the hierarchy as it doesn't depend on ProjMatrixByBin
*/
bool load_listmode_batch();
//! This function caches the list-mode batches to file. It is run during set_up()
/*! \todo Move this function higher-up in the hierarchy as it doesn't depend on ProjMatrixByBin
*/
Succeeded cache_listmode_file();
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*
Copyright (C) 2003- 2011, Hammersmith Imanet Ltd
Copyright (C) 2014, 2016, 2018, 2022 University College London
Copyright (C) 2014, 2016, 2018, 2022, 2024 University College London
Copyright (C) 2016, University of Hull
Copyright (C) 2021, University of Pennsylvania
This file is part of STIR.
Expand Down Expand Up @@ -438,132 +438,104 @@ PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByBin<Tar
}

template <typename TargetT>
Succeeded
PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByBin<TargetT>::cache_listmode_file()
bool
PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByBin<TargetT>::load_listmode_batch()
{
if (!this->recompute_cache && this->cache_lm_file)
record_cache.clear();
try
{
warning("Looking for existing cache files such as \"" + this->get_cache_filename(0) + "\".\n"
+ "We will be ignoring any time frame definitions as well as num_events_to_use!");
// find how many cache files there are
this->num_cache_files = 0;
while (true)
{
if (!FilePath::exists(this->get_cache_filename(this->num_cache_files)))
break;
++this->num_cache_files;
}
if (!this->num_cache_files)
error("No cache files found.");
return Succeeded::yes; // Stop here!!!
record_cache.reserve(this->cache_size);
}

this->num_cache_files = 0;
if (this->cache_lm_file)
catch (...)
{
info("Listmode reconstruction: Creating cache...", 2);
error("Listmode: cannot allocate cache for " + std::to_string(this->cache_size) + " records. Reduce cache size.");
}

record_cache.clear();
try
const shared_ptr<ListRecord> record_sptr = this->list_mode_data_sptr->get_empty_record_sptr();

const double start_time = this->frame_defs.get_start_time(this->current_frame_num);
const double end_time = this->frame_defs.get_end_time(this->current_frame_num);
double current_time = 0.;
unsigned long int cached_events = 0;

bool stop_caching = false;

while (true) // Start for the current cache
{
if (this->list_mode_data_sptr->get_next_record(*record_sptr) == Succeeded::no)
{
record_cache.reserve(this->cache_size);
stop_caching = true;
break;
}
catch (...)
if (record_sptr->is_time())
{
error("Listmode: cannot allocate cache for " + std::to_string(this->cache_size) + " records. Reduce cache size.");
current_time = record_sptr->time().get_time_in_secs();
if (this->do_time_frame && current_time >= end_time)
{
stop_caching = true;
break; // get out of while loop
}
}

this->list_mode_data_sptr->reset();
const shared_ptr<ListRecord> record_sptr = this->list_mode_data_sptr->get_empty_record_sptr();

const double start_time = this->frame_defs.get_start_time(this->current_frame_num);
const double end_time = this->frame_defs.get_end_time(this->current_frame_num);
double current_time = 0.;
unsigned long int cached_events = 0;

bool stop_caching = false;
record_cache.reserve(this->cache_size);

while (true) // keep caching across multiple files.
if (current_time < start_time)
continue; // skip
if (record_sptr->is_event() && record_sptr->event().is_prompt())
{
record_cache.clear();

while (true) // Start for the current cache
BinAndCorr tmp;
tmp.my_bin.set_bin_value(1.0);
record_sptr->event().get_bin(tmp.my_bin, *this->proj_data_info_sptr);

if (tmp.my_bin.get_bin_value() != 1.0f || tmp.my_bin.segment_num() < this->proj_data_info_sptr->get_min_segment_num()
|| tmp.my_bin.segment_num() > this->proj_data_info_sptr->get_max_segment_num()
|| tmp.my_bin.tangential_pos_num() < this->proj_data_info_sptr->get_min_tangential_pos_num()
|| tmp.my_bin.tangential_pos_num() > this->proj_data_info_sptr->get_max_tangential_pos_num()
|| tmp.my_bin.axial_pos_num() < this->proj_data_info_sptr->get_min_axial_pos_num(tmp.my_bin.segment_num())
|| tmp.my_bin.axial_pos_num() > this->proj_data_info_sptr->get_max_axial_pos_num(tmp.my_bin.segment_num())
|| tmp.my_bin.timing_pos_num() < this->proj_data_info_sptr->get_min_tof_pos_num()
|| tmp.my_bin.timing_pos_num() > this->proj_data_info_sptr->get_max_tof_pos_num())
{
if (this->list_mode_data_sptr->get_next_record(*record_sptr) == Succeeded::no)
{
stop_caching = true;
break;
}
if (record_sptr->is_time())
{
current_time = record_sptr->time().get_time_in_secs();
if (this->do_time_frame && current_time >= end_time)
{
stop_caching = true;
break; // get out of while loop
}
}
if (current_time < start_time)
continue;
if (record_sptr->is_event() && record_sptr->event().is_prompt())
{
BinAndCorr tmp;
tmp.my_bin.set_bin_value(1.0);
record_sptr->event().get_bin(tmp.my_bin, *this->proj_data_info_sptr);

if (tmp.my_bin.get_bin_value() != 1.0f
|| tmp.my_bin.segment_num() < this->proj_data_info_sptr->get_min_segment_num()
|| tmp.my_bin.segment_num() > this->proj_data_info_sptr->get_max_segment_num()
|| tmp.my_bin.tangential_pos_num() < this->proj_data_info_sptr->get_min_tangential_pos_num()
|| tmp.my_bin.tangential_pos_num() > this->proj_data_info_sptr->get_max_tangential_pos_num()
|| tmp.my_bin.axial_pos_num() < this->proj_data_info_sptr->get_min_axial_pos_num(tmp.my_bin.segment_num())
|| tmp.my_bin.axial_pos_num() > this->proj_data_info_sptr->get_max_axial_pos_num(tmp.my_bin.segment_num())
|| tmp.my_bin.timing_pos_num() < this->proj_data_info_sptr->get_min_tof_pos_num()
|| tmp.my_bin.timing_pos_num() > this->proj_data_info_sptr->get_max_tof_pos_num())
{
continue;
}
try
{
record_cache.push_back(tmp);
++cached_events;
}
catch (...)
{
// should never get here due to `reserve` statement above, but best to check...
error("Listmode: running out of memory for cache. Current size: "
+ std::to_string(this->record_cache.size()) + " records");
}
continue;
}
try
{
record_cache.push_back(tmp);
++cached_events;
}
catch (...)
{
// should never get here due to `reserve` statement above, but best to check...
error("Listmode: running out of memory for cache. Current size: " + std::to_string(this->record_cache.size())
+ " records");
}

if (record_cache.size() > 1 && record_cache.size() % 500000L == 0)
info(boost::format("Cached Prompt Events (this cache): %1% ") % record_cache.size());
if (record_cache.size() > 1 && record_cache.size() % 500000L == 0)
info(boost::format("Read Prompt Events (this batch): %1% ") % record_cache.size(), 3);

if (this->num_events_to_use > 0)
if (cached_events >= static_cast<std::size_t>(this->num_events_to_use))
{
stop_caching = true;
break;
}
if (this->num_events_to_use > 0)
if (cached_events >= static_cast<std::size_t>(this->num_events_to_use))
{
stop_caching = true;
break;
}

if (record_cache.size() == this->cache_size)
break; // cache is full. go to next cache.
}
}
if (record_cache.size() == this->cache_size)
break; // cache is full.
}
}
info(boost::format("Loaded %1% prompts from list-mode file") % cached_events, 2);

// add additive term to current cache
if (this->has_add)
{
info(boost::format("Caching Additive corrections for : %1% events.") % record_cache.size());
// add additive term to current cache
if (this->has_add)
{
info(boost::format("Caching Additive corrections for : %1% events.") % record_cache.size(), 2);

#ifdef STIR_OPENMP
# pragma omp parallel
{
{
# pragma omp single
{
info("Caching add background with " + std::to_string(omp_get_num_threads()) + " threads", 2);
}
}
{
info("Caching add background with " + std::to_string(omp_get_num_threads()) + " threads", 2);
}
}
#endif

#ifdef STIR_OPENMP
Expand All @@ -573,46 +545,77 @@ PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByBin<Tar
# pragma omp parallel for collapse(2) schedule(dynamic)
# endif
#endif
for (int seg = this->additive_proj_data_sptr->get_min_segment_num();
seg <= this->additive_proj_data_sptr->get_max_segment_num();
++seg)
for (int timing_pos_num = this->additive_proj_data_sptr->get_min_tof_pos_num();
timing_pos_num <= this->additive_proj_data_sptr->get_max_tof_pos_num();
++timing_pos_num)
{
const auto segment(this->additive_proj_data_sptr->get_segment_by_view(seg, timing_pos_num));
for (int seg = this->additive_proj_data_sptr->get_min_segment_num();
seg <= this->additive_proj_data_sptr->get_max_segment_num();
++seg)
for (int timing_pos_num = this->additive_proj_data_sptr->get_min_tof_pos_num();
timing_pos_num <= this->additive_proj_data_sptr->get_max_tof_pos_num();
++timing_pos_num)
{
const auto segment(this->additive_proj_data_sptr->get_segment_by_view(seg, timing_pos_num));

for (BinAndCorr& cur_bin : record_cache)
{
if (cur_bin.my_bin.segment_num() == seg)
{
for (BinAndCorr& cur_bin : record_cache)
{
if (cur_bin.my_bin.segment_num() == seg)
{
#ifdef STIR_OPENMP
# if _OPENMP >= 201012
# pragma omp atomic write
# else
# pragma omp critical(PLogLikListModePMBAddSinoCaching)
# endif
#endif
cur_bin.my_corr = segment[cur_bin.my_bin.view_num()][cur_bin.my_bin.axial_pos_num()]
[cur_bin.my_bin.tangential_pos_num()];
}
}
cur_bin.my_corr
= segment[cur_bin.my_bin.view_num()][cur_bin.my_bin.axial_pos_num()][cur_bin.my_bin.tangential_pos_num()];
}
} // end additive correction
}
}
} // end additive correction
return stop_caching;
}

if (write_listmode_cache_file(this->num_cache_files) == Succeeded::no)
{
error("Error writing cache file!");
}
template <typename TargetT>
Succeeded
PoissonLogLikelihoodWithLinearModelForMeanAndListModeDataWithProjMatrixByBin<TargetT>::cache_listmode_file()
{
if (!this->recompute_cache && this->cache_lm_file)
{
warning("Looking for existing cache files such as \"" + this->get_cache_filename(0) + "\".\n"
+ "We will be ignoring any time frame definitions as well as num_events_to_use!");
// find how many cache files there are
this->num_cache_files = 0;
while (true)
{
if (!FilePath::exists(this->get_cache_filename(this->num_cache_files)))
break;
++this->num_cache_files;
}
if (!this->num_cache_files)
error("No cache files found.");
return Succeeded::yes; // Stop here!!!
}

if (stop_caching)
break;
assert(this->cache_lm_file);

this->num_cache_files = 0;
this->list_mode_data_sptr->reset();

while (true)
{
info("Listmode reconstruction: Creating cache...", 2);

bool stop_caching = this->load_listmode_batch();

if (write_listmode_cache_file(this->num_cache_files) == Succeeded::no)
{
error("Error writing cache file!");
}
info(boost::format("Cached Events: %1% ") % cached_events);
return Succeeded::yes;
++this->num_cache_files;

if (stop_caching)
break;
}
return Succeeded::no;
return Succeeded::yes;
}

template <typename TargetT>
Expand Down

0 comments on commit 2270c16

Please sign in to comment.