Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 18 additions & 67 deletions 03-mpi-api/03-mpi-api.tex
Original file line number Diff line number Diff line change
Expand Up @@ -71,61 +71,6 @@
\tableofcontents
\end{frame}

\section{Boost.MPI}

\begin{frame}{Boost.MPI}
Boost.MPI is a part of the Boost C++ libraries that provides C++ bindings for the Message Passing Interface (MPI).

Boost.MPI makes it easier to write distributed applications in C++ by wrapping the complex MPI API with C++-friendly abstractions, improving safety and reducing the amount of boilerplate code.

Key Features of Boost.MPI:
\begin{itemize}
\item Simplified use of MPI with C++ bindings.
\item Supports complex data types through Boost.Serialization.
\item Easier management of distributed tasks and communication.
\item Compatible with common MPI implementations like MPICH, OpenMPI, MS MPI, etc.
\end{itemize}

Note: C API mappting ot Boost.MPI: \href{https://www.boost.org/doc/libs/1_86_0/doc/html/mpi/c_mapping.html}{link}

{\footnotesize For more details see Boost.MPI docs: \href{https://www.boost.org/doc/libs/1_86_0/doc/html/mpi.html}{link}}
\end{frame}

\begin{frame}[fragile]{Boost.MPI example}
\lstset{style=CStyle, caption=Hello World example with Boost MPI}
\begin{lstlisting}
#include <boost/mpi.hpp>
#include <iostream>

// Namespace alias for convenience
namespace mpi = boost::mpi;

int main(int argc, char* argv[]) {
// Initialize the MPI environment
mpi::environment env(argc, argv);
mpi::communicator world;

// Get the rank (ID) of the current process and the total number of processes
int rank = world.rank();
int size = world.size();

if (rank == 0) {
// If this is the root process (rank 0), send a message to another process
std::string message = "Hello from process 0";
world.send(1, 0, message); // Send to process 1
std::cout << "Process 0 sent: " << message << std::endl;
} else if (rank == 1) {
// If this is process 1, receive the message
std::string received_message;
world.recv(0, 0, received_message); // Receive from process 0
std::cout << "Process 1 received: " << received_message << std::endl;
}

return 0;
}
\end{lstlisting}
\end{frame}

\section{Advanced Send/Receive API}

\begin{frame}{Why Using \texttt{MPI\_Send} and \texttt{MPI\_Recv} Is Not Enough?}
Expand All @@ -144,7 +89,6 @@ \section{Advanced Send/Receive API}
{
\footnotesize
\texttt{int MPI\_Isend(const void *buf, int count, MPI\_Datatype datatype, int dest, int tag, MPI\_Comm comm, MPI\_Request *request);} \\
\texttt{boost::mpi::request boost::mpi::communicator::isend(int dest, int tag, const T* values, int n);}
}

Parameters:
Expand All @@ -167,7 +111,6 @@ \section{Advanced Send/Receive API}
{
\footnotesize
\texttt{int MPI\_Irecv(void *buf, int count, MPI\_Datatype datatype, int source, int tag, MPI\_Comm comm, MPI\_Request *request);} \\
\texttt{boost::mpi::request boost::mpi::communicator::irecv(int source, int tag, T\& value);}
}

Parameters:
Expand Down Expand Up @@ -210,7 +153,6 @@ \section{Synchronization}
{
\footnotesize
\texttt{int MPI\_Barrier(MPI\_Comm comm);} \\
\texttt{void boost::mpi::communicator::barrier();}
}

Usage:
Expand Down Expand Up @@ -252,7 +194,6 @@ \section{Collective operations}
{
\footnotesize
\texttt{int MPI\_Bcast(void *buffer, int count, MPI\_Datatype datatype, int root, MPI\_Comm comm);} \\
\texttt{void broadcast(const communicator\& comm, T\& value, int root);} (needs \texttt{\#include <boost/mpi/collectives.hpp>})
}

\begin{minipage}[t]{0.6\textwidth}
Expand Down Expand Up @@ -282,7 +223,6 @@ \section{Collective operations}
{
\footnotesize
\texttt{int MPI\_Reduce(const void *sendbuf, void *recvbuf, int count, MPI\_Datatype datatype, MPI\_Op op, int root, MPI\_Comm comm);} \\
\texttt{void reduce(const communicator\& comm, const T\& in\_value, T\& out\_value, Op op, int root);} (needs \texttt{\#include <boost/mpi/collectives.hpp>})
}

\begin{minipage}[t]{0.2\textwidth}
Expand All @@ -309,7 +249,6 @@ \section{Collective operations}
{
\footnotesize
\texttt{int MPI\_Gather(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, int root, MPI\_Comm comm);} \\
\texttt{void gather(const communicator\& comm, const T\& in\_value, std::vector<T>\& out\_values, int root);} (needs \texttt{\#include <boost/mpi/collectives.hpp>})
}

\begin{minipage}[t]{0.6\textwidth}
Expand All @@ -334,7 +273,6 @@ \section{Collective operations}
{
\footnotesize
\texttt{int MPI\_Scatter(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, int root, MPI\_Comm comm);} \\
\texttt{void scatter(const communicator\& comm, const std::vector<T>\& in\_values, T\& out\_value, int root);} (needs \texttt{\#include <boost/mpi/collectives.hpp>})
}

\begin{minipage}[t]{0.6\textwidth}
Expand All @@ -359,11 +297,25 @@ \section{Collective operations}
{
\footnotesize
\texttt{int MPI\_Allgather(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, MPI\_Comm comm);} \\
\texttt{void all\_gather(const communicator\& comm, const T\& in\_value,
std::vector<T>\& out\_values);} (needs \texttt{\#include <boost/mpi/collectives.hpp>})
}

Usage of this function reduces the need for separate gather and broadcast operations.
\begin{minipage}[t]{0.6\textwidth}
Parameters:
\begin{itemize}
\item sendbuf: Starting address of send buffer
\item sendcount / sendtype: Number and type of elements contributed by each process
\item recvbuf: Starting address of receive buffer
\item recvcount / recvtype: Number and type of elements received from each process
\end{itemize}
\end{minipage}
\hfill
\begin{minipage}[t]{0.35\textwidth}
\begin{figure}[h]
\includegraphics[scale=0.6]{images/allgather.png}
\end{figure}
\end{minipage}

{\footnotesize Source: \href{https://mpitutorial.com/tutorials/mpi-scatter-gather-and-allgather/}{https://mpitutorial.com/tutorials/mpi-scatter-gather-and-allgather/}}
\end{frame}

\begin{frame}{All-to-All (\texttt{MPI\_Alltoall})}
Expand All @@ -372,7 +324,6 @@ \section{Collective operations}
{
\footnotesize
\texttt{int MPI\_Alltoall(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, MPI\_Comm comm);} \\
\texttt{void all\_to\_all(const communicator\& comm, const std::vector<T>\& in\_values, std::vector<T>\& out\_values);} (needs \texttt{\#include <boost/mpi/collectives.hpp>})
}

Note: This operation is communication-intensive.
Expand Down Expand Up @@ -403,8 +354,8 @@ \section{Collective operations}
\begin{frame}{References}
\begin{enumerate}
\item MPI Standard \href{https://www.mpi-forum.org/docs/}{https://www.mpi-forum.org/docs/}
\item Boost.MPI Chapter in Boost documentation \href{https://www.boost.org/doc/libs/1_86_0/doc/html/mpi.html}{https://www.boost.org/doc/libs/1\_86\_0/doc/html/mpi.html}
\item Open MPI v4.0.7 documentation: \href{https://www.open-mpi.org/doc/v4.0/}{https://www.open-mpi.org/doc/v4.0/}
\item MPI Scatter, Gather, and Allgather: \href{https://mpitutorial.com/tutorials/mpi-scatter-gather-and-allgather/}{https://mpitutorial.com/tutorials/mpi-scatter-gather-and-allgather/}
\end{enumerate}
\end{frame}

Expand Down
7 changes: 3 additions & 4 deletions 03-mpi-api/03-mpi-api.toc
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
\beamer@sectionintoc {1}{Boost.MPI}{3}{0}{1}
\beamer@sectionintoc {2}{Advanced Send/Receive API}{6}{0}{2}
\beamer@sectionintoc {3}{Synchronization}{10}{0}{3}
\beamer@sectionintoc {4}{Collective operations}{13}{0}{4}
\beamer@sectionintoc {1}{Advanced Send/Receive API}{3}{0}{1}
\beamer@sectionintoc {2}{Synchronization}{7}{0}{2}
\beamer@sectionintoc {3}{Collective operations}{10}{0}{3}
Binary file added 03-mpi-api/images/allgather.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.