Skip to content

Commit

Permalink
Remove the broadcast and fix some comment
Browse files Browse the repository at this point in the history
  • Loading branch information
pengfej committed May 8, 2022
1 parent 581b253 commit d5cb7de
Showing 1 changed file with 36 additions and 21 deletions.
57 changes: 36 additions & 21 deletions source/base/data_out_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7689,11 +7689,6 @@ DataOutInterface<dim, spacedim>::write_vtu_in_parallel(
#else

const int myrank = Utilities::MPI::this_mpi_process(comm);
int rank;
MPI_Comm_size(comm, &rank);
MPI_Offset offset;
std::uint64_t size_placeholder;

MPI_Info info;
int ierr = MPI_Info_create(&info);
AssertThrowMPI(ierr);
Expand All @@ -7711,16 +7706,20 @@ DataOutInterface<dim, spacedim>::write_vtu_in_parallel(
ierr = MPI_Info_free(&info);
AssertThrowMPI(ierr);

// Define offset and header size so we can broadcast and send them
// to specific processors.
unsigned int header_size;
MPI_Offset offset;
int size;
MPI_Comm_size(MPI_COMM_WORLD, &size);

// write header
if (myrank == 0)
{
std::stringstream ss;
DataOutBase::write_vtu_header(ss, vtk_flags);
header_size = ss.str().size();
// Write the header on rank 0 and automatically move the
// shared file pointer to the location after header;
// Write the header on rank 0 at the starting of a file, i.e., offset 0.
ierr = MPI_File_write_at(
fh, 0, ss.str().c_str(), header_size, MPI_CHAR, MPI_STATUS_IGNORE);
AssertThrowMPI(ierr);
Expand All @@ -7737,9 +7736,6 @@ DataOutInterface<dim, spacedim>::write_vtu_in_parallel(
// the first piece written. But if nobody has any pieces to write (file is
// empty), let processor 0 write their empty data, otherwise the vtk file is
// invalid.



std::stringstream ss;
if (my_n_patches > 0 || (global_n_patches == 0 && myrank == 0))
DataOutBase::write_vtu_main(patches,
Expand All @@ -7749,17 +7745,17 @@ DataOutInterface<dim, spacedim>::write_vtu_in_parallel(
ss);

// using prefix sum to find specific offset to write at.

const std::uint64_t size_on_proc = ss.str().size();
size_placeholder = size_on_proc;
std::uint64_t prefix_sum = 0;
ierr = MPI_Exscan(&size_on_proc, &prefix_sum, 1, MPI_CHAR, MPI_SUM, comm);
AssertThrowMPI(ierr);

// Boradcasting the header size to all processor.
ierr = MPI_Bcast(&header_size, 1, MPI_UNSIGNED, 0, comm);
AssertThrowMPI(ierr);

offset = static_cast<MPI_Offset>(header_size) * sizeof(char) + prefix_sum;
// Locate specific offset for each processor.
offset = static_cast<MPI_Offset>(header_size) + prefix_sum;

ierr = MPI_File_write_at_all(fh,
offset,
Expand All @@ -7768,25 +7764,44 @@ DataOutInterface<dim, spacedim>::write_vtu_in_parallel(
MPI_CHAR,
MPI_STATUS_IGNORE);
AssertThrowMPI(ierr);

// Sending the size_on_proc and offset of last rank to rank 0 to write
// footer. So offset + size_on_proc is where the header should start
// writing.
if (myrank == size - 1)
{
ierr = MPI_Send(&size_on_proc, 1, MPI_UINT64_T, 0, 0, comm);
AssertThrowMPI(ierr);
ierr = MPI_Send(&offset, 1, MPI_UNSIGNED_LONG_LONG, 0, 0, comm);
AssertThrowMPI(ierr);
}
}

// write footer
if (myrank == rank - 1)
if (myrank == 0)
{
std::stringstream ss;
DataOutBase::write_vtu_footer(ss);
unsigned int footer_size = ss.str().size();

ierr = MPI_Bcast(&offset, 1, MPI_UNSIGNED_LONG_LONG, rank - 1, comm);
// Receiving size_of_proc;
std::uint64_t size_on_proc;
ierr = MPI_Recv(
&size_on_proc, 1, MPI_UINT64_T, size - 1, 0, comm, MPI_STATUS_IGNORE);
AssertThrowMPI(ierr);

// std::uint64_t size_on_proc;
// ierr = MPI_Bcast(&size_on_proc, 1, MPI_INT64_T, rank-1, comm);
ierr = MPI_Bcast(&size_placeholder, 1, MPI_INT64_T, rank - 1, comm);
// Receiving Offset;
ierr = MPI_Recv(&offset,
1,
MPI_UNSIGNED_LONG_LONG,
size - 1,
0,
comm,
MPI_STATUS_IGNORE);
AssertThrowMPI(ierr);

offset =
offset + static_cast<MPI_Offset>(size_placeholder) * sizeof(char);
// offset += size_on_proc;
// Calculating offset to write footer.
offset = offset + static_cast<MPI_Offset>(size_on_proc);

// Writing Footer.
ierr = MPI_File_write_at(
Expand Down

0 comments on commit d5cb7de

Please sign in to comment.