Skip to content

Use C++ MPI interfaces instead of C interfaces. #11

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 10 additions & 13 deletions c++/src/mpicommons.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,8 @@ void MPICommons::init()

// Switch for using MPI.
#if RUNMPI == true
// Dummy args.
int argc = 0;
char** argv;

// Make the init call.
MPI_Init( &argc, &argv );
MPI::Init();

// Set the flag to prevent further calls.
inited__ = true;
Expand All @@ -51,19 +47,19 @@ void MPICommons::finalize()
}

#if RUNMPI == true
MPI_Finalize();
finalized__ = true;
MPI::Finalize();
finalized__ = true;
#endif
}


// -----------------------------------------------------------------------------
//
int MPICommons::myRank(const MPI_Comm comm)
int MPICommons::myRank(const MPI::Intracomm & comm)
{
#if RUNMPI == true
int rank;
MPI_Comm_rank( comm, &rank );
rank = comm.Get_rank();
return rank;
#else
return 0;
Expand All @@ -73,11 +69,11 @@ int MPICommons::myRank(const MPI_Comm comm)

// -----------------------------------------------------------------------------
//
int MPICommons::size(const MPI_Comm comm)
int MPICommons::size(const MPI::Intracomm & comm)
{
#if RUNMPI == true
int size;
MPI_Comm_size( comm, &size );
size = comm.Get_size();
return size;
#else
return 1;
Expand All @@ -87,9 +83,10 @@ int MPICommons::size(const MPI_Comm comm)

// -----------------------------------------------------------------------------
//
void MPICommons::barrier(const MPI_Comm comm)
void MPICommons::barrier(const MPI::Intracomm & comm)
{
#if RUNMPI == true
MPI_Barrier( comm );
comm.Barrier();
#endif
}

9 changes: 5 additions & 4 deletions c++/src/mpicommons.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,22 +32,23 @@ struct MPICommons {
* \param comm: The communicator to use.
* \return: The rank of this process withing the given communicator.
*/
static int myRank(const MPI_Comm comm=MPI_COMM_WORLD);
static int myRank(const MPI::Intracomm & comm=MPI::COMM_WORLD);

/*! \brief Wrapps MPI_COMM_SIZE
* \param comm: The communicator to use.
* \return: The sise of the communicator (the total number of processes).
*/
static int size(const MPI_Comm comm=MPI_COMM_WORLD);
static int size(const MPI::Intracomm & comm=MPI::COMM_WORLD);

/*! \brief Wrapps MPI_BARRIER, syncronizing processes.
* \param comm: The communicator to use.
*/
static void barrier(const MPI_Comm comm=MPI_COMM_WORLD);
static void barrier(const MPI::Intracomm & comm=MPI::COMM_WORLD);

/*! \brief Returns true if the calling process is the master.
*/
static bool isMaster(const MPI_Comm comm=MPI_COMM_WORLD) { return (myRank(comm) == 0); }
static bool isMaster(const MPI::Intracomm & comm=MPI::COMM_WORLD)
{ return (myRank(comm) == 0); }



Expand Down
8 changes: 6 additions & 2 deletions c++/src/mpih.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,12 @@
#if RUNMPI == true
#include <mpi.h>
#else
typedef int MPI_Comm;
#define MPI_COMM_WORLD 91
// Redefine the MPI namespace.
namespace MPI
{
typedef int Intracomm;
static int COMM_WORLD;
}
#endif

#endif // __MPIH__
50 changes: 23 additions & 27 deletions c++/src/mpiroutines.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
// -------------------------------------------------------------------------- //
//
void distributeToAll(int & data,
const MPI_Comm & comm)
const MPI::Intracomm & comm)
{
#if RUNMPI == true

Expand All @@ -28,11 +28,10 @@ void distributeToAll(int & data,
const int root = 0;

// Send and recieve.
MPI_Bcast(&data, // The send and recieve buffer.
size, // The number of elements to communicate.
MPI_INT, // The type of data.
root, // The sender (master).
comm); // The communicator we use.
comm.Bcast(&data, // The send and recieve buffer.
size, // The number of elements to communicate.
MPI_INT, // The type of data.
root); // The sender (master).

// Done.
#endif
Expand All @@ -42,41 +41,39 @@ void distributeToAll(int & data,
// -------------------------------------------------------------------------- //
//
void sumOverProcesses(int & data,
const MPI_Comm & comm)
const MPI::Intracomm & comm)
{
#if RUNMPI == true
const int size = 1;

// Copy the data over to the send buffer.
int send = data;

MPI_Allreduce(&send, // Send buffer.
&data, // Recieve buffer (overwrite)
size, // Size of the buffers.
MPI_INT, // Data type.
MPI_SUM, // Operation to perform.
comm); // The communicator.
comm.Allreduce(&send, // Send buffer.
&data, // Recieve buffer (overwrite)
size, // Size of the buffers.
MPI_INT, // Data type.
MPI_SUM); // Operation to perform.
// Done.
#endif
}

// -------------------------------------------------------------------------- //
//
void sumOverProcesses(std::vector<int> & data,
const MPI_Comm & comm)
const MPI::Intracomm & comm)
{
#if RUNMPI == true
const int size = data.size();

// Copy the data over to the send buffer.
std::vector<int> send(data);

MPI_Allreduce(&send[0], // Send buffer.
&data[0], // Recieve buffer (overwrite)
size, // Size of the buffers.
MPI_INT, // Data type.
MPI_SUM, // Operation to perform.
comm); // The communicator.
comm.Allreduce(&send[0], // Send buffer.
&data[0], // Recieve buffer (overwrite)
size, // Size of the buffers.
MPI_INT, // Data type.
MPI_SUM); // Operation to perform.
// Done.
#endif
}
Expand All @@ -85,20 +82,19 @@ void sumOverProcesses(std::vector<int> & data,
// -------------------------------------------------------------------------- //
//
void sumOverProcesses(std::vector<double> & data,
const MPI_Comm & comm)
const MPI::Intracomm & comm)
{
#if RUNMPI == true
const int size = data.size();

// Copy the data over to the send buffer.
std::vector<double> send(data);

MPI_Allreduce(&send[0], // Send buffer.
&data[0], // Recieve buffer (overwrite)
size, // Size of the buffers.
MPI_DOUBLE, // Data type.
MPI_SUM, // Operation to perform.
comm); // The communicator.
comm.Allreduce(&send[0], // Send buffer.
&data[0], // Recieve buffer (overwrite)
size, // Size of the buffers.
MPI_DOUBLE, // Data type.
MPI_SUM); // Operation to perform.
// Done.
#endif
}
Expand Down
21 changes: 11 additions & 10 deletions c++/src/mpiroutines.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,31 +33,31 @@ std::vector< std::pair<int,int> > determineChunks(const int mpi_size,
* \param comm : The communicator to use.
*/
void distributeToAll(int & data,
const MPI_Comm & comm=MPI_COMM_WORLD);
const MPI::Intracomm & comm=MPI::COMM_WORLD);


/*! \brief Sum the data over all processors.
* \param data : The data to sum.
* \param comm : The communicator to use.
*/
void sumOverProcesses(int & data,
const MPI_Comm & comm=MPI_COMM_WORLD);
const MPI::Intracomm & comm=MPI::COMM_WORLD);


/*! \brief Sum the data over all processors.
* \param data : The data to sum.
* \param comm : The communicator to use.
*/
void sumOverProcesses(std::vector<int> & data,
const MPI_Comm & comm=MPI_COMM_WORLD);
const MPI::Intracomm & comm=MPI::COMM_WORLD);


/*! \brief Sum the data over all processors.
* \param data : The data to sum.
* \param comm : The communicator to use.
*/
void sumOverProcesses(std::vector<double> & data,
const MPI_Comm & comm=MPI_COMM_WORLD);
const MPI::Intracomm & comm=MPI::COMM_WORLD);


/*! \brief Split the global vector over the processes.
Expand All @@ -67,7 +67,7 @@ void sumOverProcesses(std::vector<double> & data,
*/
template <class T_vector>
T_vector splitOverProcesses(const T_vector & global,
const MPI_Comm & comm=MPI_COMM_WORLD);
const MPI::Intracomm & comm=MPI::COMM_WORLD);

/*! \brief Join the local vectors to form a global.
* \param local : The data vector to join.
Expand All @@ -76,7 +76,7 @@ T_vector splitOverProcesses(const T_vector & global,
*/
template <class T_vector>
T_vector joinOverProcesses(const T_vector & local,
const MPI_Comm & comm=MPI_COMM_WORLD);
const MPI::Intracomm & comm=MPI::COMM_WORLD);



Expand All @@ -89,13 +89,13 @@ T_vector joinOverProcesses(const T_vector & local,
//
template <class T_vector>
T_vector splitOverProcesses(const T_vector & global,
const MPI_Comm & comm)
const MPI::Intracomm & comm)
{
// Get the dimensions.
#if RUNMPI == true
int rank, size;
MPI_Comm_rank( comm, &rank );
MPI_Comm_size( comm, &size );
rank = comm.Get_rank();
size = comm.Get_size();
#else
int rank = 0;
int size = 1;
Expand Down Expand Up @@ -128,7 +128,7 @@ T_vector splitOverProcesses(const T_vector & global,
//
template <class T_vector>
T_vector joinOverProcesses(const T_vector & local,
const MPI_Comm & comm)
const MPI::Intracomm & comm)
{
// PERFORMME: Prototyping. Chunks does not need to be this involved.

Expand Down Expand Up @@ -175,3 +175,4 @@ T_vector joinOverProcesses(const T_vector & local,


#endif // __MPIROUTINES__

44 changes: 20 additions & 24 deletions c++/unittest/test_mpicommons.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ void Test_MPICommons::testSize()
// Get the reference size.
#if RUNMPI == true
int ref_size;
MPI_Comm_size( MPI_COMM_WORLD, &ref_size );
ref_size = MPI::COMM_WORLD.Get_size();
#else
const int ref_size = 1;
#endif
Expand All @@ -42,7 +42,7 @@ void Test_MPICommons::testRank()
// Get the reference rank.
#if RUNMPI == true
int ref_rank;
MPI_Comm_rank( MPI_COMM_WORLD, &ref_rank );
ref_rank = MPI::COMM_WORLD.Get_rank();
#else
const int ref_rank = 0;
#endif
Expand All @@ -62,7 +62,7 @@ void Test_MPICommons::testIsMaster()
// Get the reference rank.
#if RUNMPI == true
int ref_rank;
MPI_Comm_rank( MPI_COMM_WORLD, &ref_rank );
ref_rank = MPI::COMM_WORLD.Get_rank();
#else
const int ref_rank = 0;
#endif
Expand All @@ -88,9 +88,8 @@ void Test_MPICommons::testBarrier()
// Only if run in parallel.
#if RUNMPI == true
int rank, size;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
MPI_Comm_size( MPI_COMM_WORLD, &size );

rank = MPI::COMM_WORLD.Get_rank();
size = MPI::COMM_WORLD.Get_size();
time_t seconds;

std::vector<int> time_before_sleep(size);
Expand Down Expand Up @@ -120,28 +119,25 @@ void Test_MPICommons::testBarrier()

// Communicate the timing results.
std::vector<int> send1(time_before_sleep);
MPI_Allreduce(&send1[0],
&time_before_sleep[0],
size,
MPI_INT,
MPI_SUM,
MPI_COMM_WORLD);
MPI::COMM_WORLD.Allreduce(&send1[0],
&time_before_sleep[0],
size,
MPI_INT,
MPI_SUM);

std::vector<int> send2(time_after_sleep);
MPI_Allreduce(&send2[0],
&time_after_sleep[0],
size,
MPI_INT,
MPI_SUM,
MPI_COMM_WORLD);
MPI::COMM_WORLD.Allreduce(&send2[0],
&time_after_sleep[0],
size,
MPI_INT,
MPI_SUM);

std::vector<int> send3(time_after_barrier);
MPI_Allreduce(&send3[0],
&time_after_barrier[0],
size,
MPI_INT,
MPI_SUM,
MPI_COMM_WORLD);
MPI::COMM_WORLD.Allreduce(&send3[0],
&time_after_barrier[0],
size,
MPI_INT,
MPI_SUM);

// Check that the results.

Expand Down
Loading