6464 \tableofcontents
6565\end {frame }
6666
67+ \section {Boost.MPI }
68+
69+ \begin {frame }{Boost.MPI}
70+ Boost.MPI is a part of the Boost C++ libraries that provides C++ bindings for the Message Passing Interface (MPI).
71+
72+ Boost.MPI makes it easier to write distributed applications in C++ by wrapping the complex MPI API with C++-friendly abstractions, improving safety and reducing the amount of boilerplate code.
73+
74+ Key Features of Boost.MPI:
75+ \begin {itemize }
76+ \item Simplified use of MPI with C++ bindings.
77+ \item Supports complex data types through Boost.Serialization.
78+ \item Easier management of distributed tasks and communication.
79+ \item Compatible with common MPI implementations like MPICH, OpenMPI, MS MPI, etc.
80+ \end {itemize }
81+
82+ Note: C API mapptint ot Boost.MPI: \href {https://www.boost.org/doc/libs/1_86_0/doc/html/mpi/c_mapping.html}{link}
83+
84+ {\footnotesize For more details see Boost.MPI docs: \href {https://www.boost.org/doc/libs/1_86_0/doc/html/mpi.html}{link}}
85+ \end {frame }
86+
87+ \begin {frame }[fragile]{Boost.MPI example}
88+ \lstset {style=CStyle, caption=Hello World example with Boost MPI}
89+ \ begin{lstlisting}
90+ #include <boost/mpi.hpp>
91+ #include <iostream>
92+
93+ // Namespace alias for convenience
94+ namespace mpi = boost::mpi;
95+
96+ int main(int argc, char* argv[]) {
97+ // Initialize the MPI environment
98+ mpi::environment env(argc, argv);
99+ mpi::communicator world;
100+
101+ // Get the rank (ID) of the current process and the total number of processes
102+ int rank = world.rank();
103+ int size = world.size();
104+
105+ if (rank == 0) {
106+ // If this is the root process (rank 0), send a message to another process
107+ std::string message = "Hello from process 0";
108+ world.send(1, 0, message); // Send to process 1
109+ std::cout << "Process 0 sent: " << message << std::endl;
110+ } else if (rank == 1) {
111+ // If this is process 1, receive the message
112+ std::string received_message;
113+ world.recv(0, 0, received_message); // Receive from process 0
114+ std::cout << "Process 1 received: " << received_message << std::endl;
115+ }
116+
117+ return 0;
118+ }
119+ \end {lstlisting }
120+ \end {frame }
121+
67122\section {Advanced Send/Receive API }
68123
69124\begin {frame }{Why Using \texttt {MPI\_ Send } and \texttt {MPI\_ Recv } Is Not Enough?}
@@ -79,7 +134,11 @@ \section{Advanced Send/Receive API}
79134\begin {frame }{\texttt {MPI\_ Isend }}
80135 Non-Blocking Send function. Initiates a send operation that returns immediately.
81136
82- \texttt {int MPI\_ Isend(const void *buf, int count, MPI\_ Datatype datatype, int dest, int tag, MPI\_ Comm comm, MPI\_ Request *request); }
137+ {
138+ \footnotesize
139+ \texttt {int MPI\_ Isend(const void *buf, int count, MPI\_ Datatype datatype, int dest, int tag, MPI\_ Comm comm, MPI\_ Request *request); } \\
140+ \texttt {boost::mpi::request boost::mpi::communicator::isend(int dest, int tag, const T* values, int n); }
141+ }
83142
84143 Parameters:
85144
@@ -92,13 +151,17 @@ \section{Advanced Send/Receive API}
92151 \item comm: Communicator
93152 \item request: Communication request handle
94153 \end {itemize }
95- Usage: Allows the sender to proceed with computation while the message is being sent.
154+ { \footnotesize Usage: Allows the sender to proceed with computation while the message is being sent.}
96155\end {frame }
97156
98157\begin {frame }{\texttt {MPI\_ Irecv }}
99158 Non-Blocking Receive function. Initiates a receive operation that returns immediately.
100159
101- \texttt {int MPI\_ Irecv(void *buf, int count, MPI\_ Datatype datatype, int source, int tag, MPI\_ Comm comm, MPI\_ Request *request); }
160+ {
161+ \footnotesize
162+ \texttt {int MPI\_ Irecv(void *buf, int count, MPI\_ Datatype datatype, int source, int tag, MPI\_ Comm comm, MPI\_ Request *request); } \\
163+ \texttt {boost::mpi::request boost::mpi::communicator::irecv(int source, int tag, T\& value); }
164+ }
102165
103166 Parameters:
104167
@@ -111,7 +174,7 @@ \section{Advanced Send/Receive API}
111174 \item comm: Communicator
112175 \item request: Communication request handle
113176 \end {itemize }
114- Usage: Allows the receiver to proceed with computation while waiting for the message.
177+ { \footnotesize Usage: Allows the receiver to proceed with computation while waiting for the message.}
115178\end {frame }
116179
117180\section {Synchronization }
@@ -137,7 +200,11 @@ \section{Synchronization}
137200\begin {frame }{\texttt {MPI\_ Barrier }}
138201 Global Synchronization function. It blocks processes until all of them have reached the barrier.
139202
140- \texttt {int MPI\_ Barrier(MPI\_ Comm comm); }
203+ {
204+ \footnotesize
205+ \texttt {int MPI\_ Barrier(MPI\_ Comm comm); } \\
206+ \texttt {void boost::mpi::communicator::barrier(); }
207+ }
141208
142209 Usage:
143210
@@ -175,7 +242,11 @@ \section{Collective operations}
175242\begin {frame }{Broadcast (\texttt {MPI\_ Bcast })}
176243 Send data from one process to all other processes.
177244
178- \texttt {int MPI\_ Bcast(void *buffer, int count, MPI\_ Datatype datatype, int root, MPI\_ Comm comm); }
245+ {
246+ \footnotesize
247+ \texttt {int MPI\_ Bcast(void *buffer, int count, MPI\_ Datatype datatype, int root, MPI\_ Comm comm); } \\
248+ \texttt {void broadcast(const communicator\& comm, T\& value, int root); } (needs \texttt {\# include <boost/mpi/collectives.hpp> })
249+ }
179250
180251 \begin {minipage }[t]{0.6\textwidth }
181252 Parameters:
@@ -201,7 +272,11 @@ \section{Collective operations}
201272
202273 Can be seen as the opposite operation to broadcast.
203274
204- \texttt {int MPI\_ Reduce(const void *sendbuf, void *recvbuf, int count, MPI\_ Datatype datatype, MPI\_ Op op, int root, MPI\_ Comm comm); }
275+ {
276+ \footnotesize
277+ \texttt {int MPI\_ Reduce(const void *sendbuf, void *recvbuf, int count, MPI\_ Datatype datatype, MPI\_ Op op, int root, MPI\_ Comm comm); } \\
278+ \texttt {void reduce(const communicator\& comm, const T\& in\_ value, T\& out\_ value, Op op, int root); } (needs \texttt {\# include <boost/mpi/collectives.hpp> })
279+ }
205280
206281 \begin {minipage }[t]{0.2\textwidth }
207282 Supported operations:
@@ -224,7 +299,11 @@ \section{Collective operations}
224299\begin {frame }{\texttt {MPI\_ Gather }}
225300 Collect data from all processes to a single root process.
226301
227- \texttt {int MPI\_ Gather(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, int root, MPI\_ Comm comm); }
302+ {
303+ \footnotesize
304+ \texttt {int MPI\_ Gather(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, int root, MPI\_ Comm comm); } \\
305+ \texttt {void gather(const communicator\& comm, const T\& in\_ value, std::vector<T>\& out\_ values, int root); } (needs \texttt {\# include <boost/mpi/collectives.hpp> })
306+ }
228307
229308 \begin {minipage }[t]{0.6\textwidth }
230309 Parameters:
@@ -245,7 +324,11 @@ \section{Collective operations}
245324\begin {frame }{\texttt {MPI\_ Scatter }}
246325 Distribute distinct chunks of data from root to all processes.
247326
248- \texttt {int MPI\_ Scatter(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, int root, MPI\_ Comm comm); }
327+ {
328+ \footnotesize
329+ \texttt {int MPI\_ Scatter(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, int root, MPI\_ Comm comm); } \\
330+ \texttt {void scatter(const communicator\& comm, const std::vector<T>\& in\_ values, T\& out\_ value, int root); } (needs \texttt {\# include <boost/mpi/collectives.hpp> })
331+ }
249332
250333 \begin {minipage }[t]{0.6\textwidth }
251334 Parameters:
@@ -266,15 +349,24 @@ \section{Collective operations}
266349\begin {frame }{\texttt {MPI\_ AllGather }}
267350 Gather data from all processes and distributes the combined data to all processes.
268351
269- \texttt {int MPI\_ Allgather(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, MPI\_ Comm comm); }
352+ {
353+ \footnotesize
354+ \texttt {int MPI\_ Allgather(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, MPI\_ Comm comm); } \\
355+ \texttt {void all\_ gather(const communicator\& comm, const T\& in\_ value,
356+ std::vector<T>\& out\_ values); } (needs \texttt {\# include <boost/mpi/collectives.hpp> })
357+ }
270358
271359 Usage of this function reduces the need for separate gather and broadcast operations.
272360\end {frame }
273361
274362\begin {frame }{All-to-All (\texttt {MPI\_ Alltoall })}
275363 Description: Each process sends data to and receives data from all other processes. It can be seen as transposing a matrix distributed across processes.
276364
277- \texttt {int MPI\_ Alltoall(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, MPI\_ Comm comm); }
365+ {
366+ \footnotesize
367+ \texttt {int MPI\_ Alltoall(const void *sendbuf, int sendcount, MPI\_ Datatype sendtype, void *recvbuf, int recvcount, MPI\_ Datatype recvtype, MPI\_ Comm comm); } \\
368+ \texttt {void all\_ to\_ all(const communicator\& comm, const std::vector<T>\& in\_ values, std::vector<T>\& out\_ values); } (needs \texttt {\# include <boost/mpi/collectives.hpp> })
369+ }
278370
279371 Note: This operation is communication-intensive.
280372\end {frame }
@@ -304,6 +396,7 @@ \section{Collective operations}
304396\begin {frame }{References}
305397 \begin {enumerate }
306398 \item MPI Standard \href {https://www.mpi-forum.org/docs/}{https://www.mpi-forum.org/docs/}
399+ \item Boost.MPI Chapter in Boost documentation \href {https://www.boost.org/doc/libs/1_86_0/doc/html/mpi.html}{https://www.boost.org/doc/libs/1_86_0/doc/html/mpi.html}
307400 \item Open MPI v4.0.7 documentation: \href {https://www.open-mpi.org/doc/v4.0/}{https://www.open-mpi.org/doc/v4.0/}
308401 \end {enumerate }
309402\end {frame }
0 commit comments