Skip to content

Commit

Permalink
Fixed #1002: If MPI parcelport is not available, running HPX under mp…
Browse files Browse the repository at this point in the history
…irun should fail
  • Loading branch information
hkaiser committed Nov 9, 2013
1 parent 1d3ec86 commit f40c5c9
Show file tree
Hide file tree
Showing 4 changed files with 68 additions and 19 deletions.
19 changes: 17 additions & 2 deletions hpx/util/mpi_environment.hpp
Expand Up @@ -12,12 +12,14 @@
#include <hpx/hpx_fwd.hpp>
#include <cstdlib>

namespace hpx { namespace util {
namespace hpx { namespace util
{
struct command_line_handling;

struct HPX_EXPORT mpi_environment
{
static int init(int *argc, char ***argv, command_line_handling& cfg);
static std::size_t init(int *argc, char ***argv, command_line_handling& cfg,
std::size_t node);
static void finalize();

static bool enabled();
Expand All @@ -34,5 +36,18 @@ namespace hpx { namespace util {
};
}}

#else

namespace hpx { namespace util
{
struct command_line_handling;

struct HPX_EXPORT mpi_environment
{
static std::size_t init(int *argc, char ***argv, command_line_handling& cfg,
std::size_t node);
};
}}

#endif
#endif
4 changes: 0 additions & 4 deletions src/hpx_init.cpp
Expand Up @@ -44,10 +44,6 @@
#include <boost/assign/std/vector.hpp>
#include <boost/foreach.hpp>

#if defined(HPX_HAVE_PARCELPORT_MPI)
#include <hpx/util/mpi_environment.hpp>
#endif

///////////////////////////////////////////////////////////////////////////////
namespace hpx
{
Expand Down
7 changes: 1 addition & 6 deletions src/util/command_line_handling.cpp
Expand Up @@ -15,10 +15,7 @@
#include <hpx/runtime/threads/topology.hpp>
#include <hpx/runtime/threads/policies/affinity_data.hpp>
#include <hpx/runtime/threads/policies/topology.hpp>

#if defined(HPX_HAVE_PARCELPORT_MPI)
#include <hpx/util/mpi_environment.hpp>
#endif

#include <boost/asio.hpp>
#include <boost/lexical_cast.hpp>
Expand Down Expand Up @@ -783,10 +780,8 @@ namespace hpx { namespace util
// Re-run program option analysis, ini settings (such as aliases)
// will be considered now.

#if defined(HPX_HAVE_PARCELPORT_MPI)
node = static_cast<std::size_t>(
util::mpi_environment::init(&argc, &argv, *this));
#endif
util::mpi_environment::init(&argc, &argv, *this, node));

// minimally assume one locality and this is the console
if (node == std::size_t(-1))
Expand Down
57 changes: 50 additions & 7 deletions src/util/mpi_environment.cpp
Expand Up @@ -7,6 +7,8 @@

#if defined(HPX_HAVE_PARCELPORT_MPI)
#include <mpi.h>
#endif

#include <hpx/config.hpp>

#include <hpx/hpx_fwd.hpp>
Expand Down Expand Up @@ -60,19 +62,24 @@ namespace hpx { namespace util
return dflt;
}
}
}}

#if defined(HPX_HAVE_PARCELPORT_MPI)
namespace hpx { namespace util
{
bool mpi_environment::enabled_ = false;
int mpi_environment::provided_threading_flag_ = MPI_THREAD_SINGLE;

int mpi_environment::init(int *argc, char ***argv, command_line_handling& cfg)
std::size_t mpi_environment::init(int *argc, char ***argv, command_line_handling& cfg,
std::size_t /*node*/)
{
using namespace boost::assign;

int this_rank = -1;

// We assume to use the MPI parcelport if it is not explicitly disabled
enabled_ = detail::get_cfg_entry(cfg, "hpx.parcel.mpi.enable", 1) != 0;
if (!enabled_) return this_rank;
if (!enabled_) return std::size_t(this_rank);

// We disable the MPI parcelport if the application is not run using mpirun
// and the tcp/ip parcelport is not explicitly disabled
Expand All @@ -86,7 +93,7 @@ namespace hpx { namespace util
cfg.rtcfg_.add_entry("hpx.parcel.mpi.enable", "0");

enabled_ = false;
return this_rank;
return std::size_t(this_rank);
}

cfg.ini_config_ += "hpx.parcel.bootstrap!=mpi";
Expand All @@ -108,7 +115,7 @@ namespace hpx { namespace util
MPI_Error_string(retval, message, &msglen);
message[msglen] = '\0';

std::string msg("MPI_Init_thread failed: ");
std::string msg("mpi_environment::init: MPI_Init_thread failed: ");
msg = msg + message + ".";
throw std::runtime_error(msg.c_str());
}
Expand All @@ -118,8 +125,8 @@ namespace hpx { namespace util
cfg.rtcfg_.add_entry("hpx.parcel.mpi.enable", "0");

enabled_ = false;
throw std::runtime_error("MPI_Init_thread: provided multi_threading "
"mode is different from requested mode");
throw std::runtime_error("mpi_environment::init: MPI_Init_thread: "
"provided multi_threading mode is different from requested mode");
}

this_rank = rank();
Expand All @@ -139,7 +146,7 @@ namespace hpx { namespace util
cfg.ini_config_ += std::string("hpx.parcel.mpi.processorname!=") +
get_processor_name();

return this_rank;
return std::size_t(this_rank);
}

std::string mpi_environment::get_processor_name()
Expand Down Expand Up @@ -186,4 +193,40 @@ namespace hpx { namespace util
}
}}

#else

#include <hpx/hpx_fwd.hpp>
#include <hpx/util/runtime_configuration.hpp>
#include <hpx/util/command_line_handling.hpp>
#include <hpx/util/mpi_environment.hpp>

namespace hpx { namespace util
{
std::size_t mpi_environment::init(int *argc, char ***argv, command_line_handling& cfg,
std::size_t node)
{
// if somebody tries to enforce using MPI, bail out
if (detail::get_cfg_entry(cfg, "hpx.parcel.mpi.enable", 1) != 0)
{
throw std::runtime_error("mpi_environment::init: "
"HPX is not compiled for MPI, but 'hpx.parcel.mpi.enable=1'. "
"Please set HPX_HAVE_PARCELPORT_MPI=ON while configuring using cmake.");
}

// We disable the MPI parcelport if the application is not run using mpirun
// and the tcp/ip parcelport is not explicitly disabled
//
// The bottomline is that we use the MPI parcelport either when the application
// was executed using mpirun or if the tcp/ip parcelport was disabled.
if (detail::detect_mpi_environment(cfg.rtcfg_))
{
throw std::runtime_error("mpi_environment::init: "
"HPX is not compiled for MPI, but the application was run using mpirun. "
"Please set HPX_HAVE_PARCELPORT_MPI=ON while configuring using cmake.");
}

return node;
}
}}

#endif

0 comments on commit f40c5c9

Please sign in to comment.