Skip to content

Commit

Permalink
Fixx MPI bug (#21960)
Browse files Browse the repository at this point in the history
  • Loading branch information
dschwen committed Aug 30, 2022
1 parent d5e6a7b commit d364de5
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 14 deletions.
3 changes: 3 additions & 0 deletions framework/include/userobject/RadialAverage.h
Expand Up @@ -117,6 +117,9 @@ class RadialAverage : public ElementUserObject
std::vector<std::set<std::size_t>> _communication_lists;
bool _update_communication_lists;

/// processors to send (potentially empty) data to
std::vector<processor_id_type> _candidate_procs;

processor_id_type _my_pid;

//@{ PerfGraph identifiers
Expand Down
26 changes: 12 additions & 14 deletions framework/src/userobject/RadialAverage.C
Expand Up @@ -123,24 +123,18 @@ RadialAverage::finalize()
libMesh::n_threads() > 1)
updateCommunicationLists();

// sparse send data (processor ID,)
std::vector<std::size_t> non_zero_comm;
for (auto i = beginIndex(_communication_lists); i < _communication_lists.size(); ++i)
if (!_communication_lists[i].empty())
non_zero_comm.push_back(i);

// data structures for sparse point to point communication
std::vector<std::vector<QPData>> send(non_zero_comm.size());
std::vector<Parallel::Request> send_requests(non_zero_comm.size());
std::vector<std::vector<QPData>> send(_candidate_procs.size());
std::vector<Parallel::Request> send_requests(_candidate_procs.size());
Parallel::MessageTag send_tag = _communicator.get_unique_tag(4711);
std::vector<QPData> receive;

const auto item_type = TIMPI::StandardType<QPData>(&(_qp_data[0]));

// fill buffer and send structures
for (auto i = beginIndex(non_zero_comm); i < non_zero_comm.size(); ++i)
for (const auto i : index_range(_candidate_procs))
{
const auto pid = non_zero_comm[i];
const auto pid = _candidate_procs[i];
const auto & list = _communication_lists[pid];

// fill send buffer for transfer to pid
Expand All @@ -153,8 +147,12 @@ RadialAverage::finalize()
}

// receive messages - we assume that we receive as many messages as we send!
for (auto i = beginIndex(non_zero_comm); i < non_zero_comm.size(); ++i)
// bounding box overlapp is transitive, but data exhange between overlapping procs could still
// be unidirectional!
for (const auto i : index_range(_candidate_procs))
{
libmesh_ignore(i);

// inspect incoming message
Parallel::Status status(_communicator.probe(Parallel::any_source, send_tag));
const auto source_pid = TIMPI::cast_int<processor_id_type>(status.source());
Expand Down Expand Up @@ -279,14 +277,14 @@ RadialAverage::updateCommunicationLists()
bbs.emplace_back(pp.first - rpoint, pp.second + rpoint);

// get candidate processors (overlapping bounding boxes)
std::vector<processor_id_type> candidate_procs;
_candidate_procs.clear();
for (const auto pid : index_range(bbs))
if (pid != _my_pid && bbs[pid].intersects(mypp))
candidate_procs.push_back(pid);
_candidate_procs.push_back(pid);

// go over all boundary data items and send them to the proc they overlap with
for (const auto i : _boundary_data_indices)
for (const auto pid : candidate_procs)
for (const auto pid : _candidate_procs)
if (bbs[pid].contains_point(_qp_data[i]._q_point))
_communication_lists[pid].insert(i);

Expand Down

0 comments on commit d364de5

Please sign in to comment.