diff --git a/src/tests/blueprint/CMakeLists.txt b/src/tests/blueprint/CMakeLists.txt index 468d7fa98..c4d922c40 100644 --- a/src/tests/blueprint/CMakeLists.txt +++ b/src/tests/blueprint/CMakeLists.txt @@ -1,45 +1,45 @@ ############################################################################### # Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. -# +# # Produced at the Lawrence Livermore National Laboratory -# +# # LLNL-CODE-666778 -# +# # All rights reserved. -# -# This file is part of Conduit. -# +# +# This file is part of Conduit. +# # For details, see: http://software.llnl.gov/conduit/. -# +# # Please also read conduit/LICENSE -# -# Redistribution and use in source and binary forms, with or without +# +# Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, +# +# * Redistributions of source code must retain the above copyright notice, # this list of conditions and the disclaimer below. -# +# # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the disclaimer (as noted below) in the # documentation and/or other materials provided with the distribution. -# +# # * Neither the name of the LLNS/LLNL nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. -# +# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, # LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -# +# ############################################################################### ################################ @@ -49,7 +49,7 @@ ################################ # blueprint lib Unit Tests ################################ -set(BLUEPRINT_TESTS t_blueprint_smoke +set(BLUEPRINT_TESTS t_blueprint_smoke t_blueprint_mesh_verify t_blueprint_mesh_transform t_blueprint_mesh_generate @@ -90,6 +90,21 @@ else() message(STATUS "Fortran disabled: Skipping conduit blueprint fortran interface tests") endif() +set(BLUEPRINT_MPI_TESTS t_blueprint_mpi_relay) + +if(MPI_FOUND) + message(STATUS "MPI enabled: Adding conduit_blueprint_mpi unit tests") + foreach(TEST ${BLUEPRINT_MPI_TESTS}) + add_cpp_mpi_test(TEST ${TEST} NUM_MPI_TASKS 2 DEPENDS_ON mpi + conduit + conduit_blueprint + conduit_relay + conduit_relay_mpi) + endforeach() +else() + message(STATUS "MPI disabled: Skipping conduit_relay_mpi tests") +endif() + message(STATUS "Adding blueprint lib Zfp unit tests") add_cpp_test(TEST t_blueprint_zfp DEPENDS_ON conduit conduit_blueprint diff --git a/src/tests/blueprint/t_blueprint_mpi_relay.cpp b/src/tests/blueprint/t_blueprint_mpi_relay.cpp new file mode 100644 index 000000000..04b3bc7be --- /dev/null +++ b/src/tests/blueprint/t_blueprint_mpi_relay.cpp @@ -0,0 +1,268 @@ +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// +// Copyright (c) 2014, Lawrence Livermore National Security, LLC. +// +// Produced at the Lawrence Livermore National Laboratory +// +// LLNL-CODE-666778 +// +// All rights reserved. +// +// This file is part of Conduit. +// +// For details, see https://lc.llnl.gov/conduit/. +// +// Please also read conduit/LICENSE +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the disclaimer below. +// +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the disclaimer (as noted below) in the +// documentation and/or other materials provided with the distribution. +// +// * Neither the name of the LLNS/LLNL nor the names of its contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, +// LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// + +//----------------------------------------------------------------------------- +/// +/// file: t_blueprint_mpi_relay.cpp +/// +//----------------------------------------------------------------------------- + +#include "conduit.hpp" +#include "conduit_blueprint.hpp" +#include "conduit_relay.hpp" +#include "conduit_relay_mpi.hpp" +#include "conduit_utils.hpp" + +#include +#include +#include +#include +#include "gtest/gtest.h" + +using namespace conduit; +using namespace conduit::relay; +using namespace conduit::relay::mpi; +using namespace conduit::utils; + +using namespace std; + +//----------------------------------------------------------------------------- +void mesh_blueprint_save(const Node &data, + const std::string &path, + const std::string &file_protocol) +{ + Node io_protos; + relay::io::about(io_protos["io"]); + bool hdf5_enabled = io_protos["io/protocols/hdf5"].as_string() == "enabled"; + + // only run this test if hdf5 is enabled + if(!hdf5_enabled) + { + CONDUIT_INFO("hdf5 is disabled, skipping hdf5 dependent test"); + return; + } + + // For simplicity, this code assumes that all ranks have + // data and that data is NOT multi-domain. + + int par_rank = mpi::rank(MPI_COMM_WORLD); + int par_size = mpi::size(MPI_COMM_WORLD); + + // setup the directory + char fmt_buff[64]; + int cycle = data["state/cycle"].to_int32(); + snprintf(fmt_buff, sizeof(fmt_buff), "%06d",cycle); + + std::string output_base_path = path; + + ostringstream oss; + oss << output_base_path << ".cycle_" << fmt_buff; + string output_dir = oss.str(); + + bool dir_ok = false; + + // let rank zero handle dir creation + if(par_rank == 0) + { + // check of the dir exists + dir_ok = is_directory(output_dir); + + if(!dir_ok) + { + // if not try to let rank zero create it + dir_ok = create_directory(output_dir); + } + } + + int local_domains, global_domains; + local_domains = 1; + + // use an mpi sum to check if the dir exists + Node n_src, n_reduce; + + if(dir_ok) + n_src = (int)1; + else + n_src = (int)0; + + mpi::sum_all_reduce(n_src, + n_reduce, + MPI_COMM_WORLD); + + dir_ok = (n_reduce.as_int() == 1); + + // find out how many domains there are + n_src = local_domains; + + mpi::sum_all_reduce(n_src, + n_reduce, + MPI_COMM_WORLD); + + global_domains = n_reduce.as_int(); + + if(!dir_ok) + { + CONDUIT_ERROR("Error: failed to create directory " << output_dir); + } + + // write out our local domain + uint64 domain = data["state/domain_id"].to_uint64(); + + snprintf(fmt_buff, sizeof(fmt_buff), "%06llu",domain); + oss.str(""); + oss << "domain_" << fmt_buff << "." << file_protocol; + string output_file = join_file_path(output_dir,oss.str()); + relay::io::save(data, output_file); + + int root_file_writer = 0; + + // let rank zero write out the root file + if(par_rank == root_file_writer) + { + snprintf(fmt_buff, sizeof(fmt_buff), "%06d",cycle); + + oss.str(""); + oss << path + << ".cycle_" + << fmt_buff + << ".root"; + + string root_file = oss.str(); + + string output_dir_base, output_dir_path; + + rsplit_string(output_dir, + "/", + output_dir_base, + output_dir_path); + + string output_file_pattern = join_file_path(output_dir_base, "domain_%06d." + file_protocol); + + + Node root; + Node &bp_idx = root["blueprint_index"]; + + blueprint::mesh::generate_index(data, + "", + global_domains, + bp_idx["mesh"]); + + // work around conduit and manually add state fields + // sometimes they were not present and bad things happened + if(data.has_path("state/cycle")) + { + bp_idx["mesh/state/cycle"] = data["state/cycle"].to_int32(); + } + + if(data.has_path("state/time")) + { + bp_idx["mesh/state/time"] = data["state/time"].to_double(); + } + + root["protocol/name"] = file_protocol; + root["protocol/version"] = "0.4.0"; + + root["number_of_files"] = global_domains; + // for now we will save one file per domain, so trees == files + root["number_of_trees"] = global_domains; + // TODO: make sure this is relative + root["file_pattern"] = output_file_pattern; + root["tree_pattern"] = "/"; + + relay::io::save(root,root_file,file_protocol); + } +} +//----------------------------------------------------------------------------- +TEST(blueprint_mpi_relay, basic_use) +{ + Node io_protos; + relay::io::about(io_protos["io"]); + bool hdf5_enabled = io_protos["io/protocols/hdf5"].as_string() == "enabled"; + + // only run this test if hdf5 is enabled + if(!hdf5_enabled) + { + CONDUIT_INFO("hdf5 is disabled, skipping hdf5 dependent test"); + return; + } + + int rank = mpi::rank(MPI_COMM_WORLD); + int com_size = mpi::size(MPI_COMM_WORLD); + std::cout<<"Rank "< doubles; - + doubles.push_back(rank+1); doubles.push_back(3.4124*rank); doubles.push_back(10.7 - rank); n1.set_external(doubles); - + mpi::Request request; MPI_Status status; - if (rank == 0) + if (rank == 0) { mpi::irecv(n1, 1, 0, MPI_COMM_WORLD, &request); mpi::wait_recv(&request, &status); - } else if (rank == 1) + } else if (rank == 1) { mpi::isend(n1, 0, 0, MPI_COMM_WORLD, &request); mpi::wait_send(&request, &status); @@ -537,35 +537,35 @@ TEST(conduit_mpi_test, isend_irecv_wait) EXPECT_EQ(n1.as_float64_ptr()[0], 2); EXPECT_EQ(n1.as_float64_ptr()[1], 3.4124); EXPECT_EQ(n1.as_float64_ptr()[2], 9.7); - + } //----------------------------------------------------------------------------- -TEST(conduit_mpi_test, waitall) +TEST(conduit_mpi_test, waitall) { Node n1; int rank = 0; MPI_Comm_rank(MPI_COMM_WORLD, &rank); std::vector doubles; - + doubles.push_back(rank+1); doubles.push_back(3.4124*rank); doubles.push_back(10.7 - rank); n1.set_external(doubles); - + mpi::Request requests[1]; MPI_Status statuses[1]; - if (rank == 0) + if (rank == 0) { mpi::irecv(n1, 1, 0, MPI_COMM_WORLD, &requests[0]); mpi::wait_all_recv(1, requests, statuses); - } else if (rank == 1) + } else if (rank == 1) { mpi::isend(n1, 0, 0, MPI_COMM_WORLD, &requests[0]); mpi::wait_all_send(1, requests, statuses); @@ -574,11 +574,11 @@ TEST(conduit_mpi_test, waitall) EXPECT_EQ(n1.as_float64_ptr()[0], 2); EXPECT_EQ(n1.as_float64_ptr()[1], 3.4124); EXPECT_EQ(n1.as_float64_ptr()[2], 9.7); - + } //----------------------------------------------------------------------------- -TEST(conduit_mpi_test, waitallmultirequest) +TEST(conduit_mpi_test, waitallmultirequest) { Node n1; Node n2; @@ -586,7 +586,7 @@ TEST(conduit_mpi_test, waitallmultirequest) MPI_Comm_rank(MPI_COMM_WORLD, &rank); std::vector doubles; - + doubles.push_back(rank+1); doubles.push_back(3.4124*rank); @@ -595,19 +595,19 @@ TEST(conduit_mpi_test, waitallmultirequest) n1.set_external(doubles); n2 = 13123; - + mpi::Request requests[2]; MPI_Status statuses[2]; - if (rank == 0) + if (rank == 0) { mpi::irecv(n1, 1, 0, MPI_COMM_WORLD, &requests[0]); mpi::irecv(n2, 1, 0, MPI_COMM_WORLD, &requests[1]); mpi::wait_all_recv(2, requests, statuses); } - else if (rank == 1) + else if (rank == 1) { mpi::isend(n1, 0, 0, MPI_COMM_WORLD, &requests[0]); mpi::isend(n2, 0, 0, MPI_COMM_WORLD, &requests[1]); @@ -619,11 +619,11 @@ TEST(conduit_mpi_test, waitallmultirequest) EXPECT_EQ(n1.as_float64_ptr()[2], 9.7); EXPECT_EQ(n2.as_int32(), 13123); - + } //----------------------------------------------------------------------------- -TEST(conduit_mpi_test, external) +TEST(conduit_mpi_test, external) { Node n1; int rank = 0; @@ -632,7 +632,7 @@ TEST(conduit_mpi_test, external) std::vector doubles1; std::vector doubles2; std::vector doubles3; - + doubles1.push_back(rank+1); doubles1.push_back(3.4124*rank); @@ -655,11 +655,11 @@ TEST(conduit_mpi_test, external) mpi::Request request; MPI_Status status; - if (rank == 0) + if (rank == 0) { mpi::irecv(n1, 1, 0, MPI_COMM_WORLD, &request); mpi::wait_recv(&request, &status); - + } else if (rank == 1) { @@ -675,31 +675,31 @@ TEST(conduit_mpi_test, external) EXPECT_EQ(n1[1].as_float64_ptr()[0], 3); EXPECT_EQ(n1[1].as_float64_ptr()[1], 4.4124); EXPECT_EQ(n1[1].as_float64_ptr()[2], 10.7); - + EXPECT_EQ(n1[2].as_float64_ptr()[0], 4); EXPECT_EQ(n1[2].as_float64_ptr()[1], 5.4124); EXPECT_EQ(n1[2].as_float64_ptr()[2], 11.7); } //----------------------------------------------------------------------------- -TEST(conduit_mpi_test, allgather_simple) +TEST(conduit_mpi_test, allgather_simple) { Node n; - + int rank = mpi::rank(MPI_COMM_WORLD); n["values/a"] = rank+1; n["values/b"] = rank+2; n["values/c"] = rank+3; - + Node rcv; mpi::all_gather(n,rcv,MPI_COMM_WORLD); rcv.print(); - + Node res; res.set_external((int*)rcv.data_ptr(), 6); res.print(); - + int *res_ptr = res.value(); EXPECT_EQ(res_ptr[0],1); EXPECT_EQ(res_ptr[1],2); @@ -711,26 +711,26 @@ TEST(conduit_mpi_test, allgather_simple) } //----------------------------------------------------------------------------- -TEST(conduit_mpi_test, gather_simple) +TEST(conduit_mpi_test, gather_simple) { Node n; - + int rank = mpi::rank(MPI_COMM_WORLD); n["values/a"] = rank+1; n["values/b"] = rank+2; n["values/c"] = rank+3; - + Node rcv; mpi::all_gather(n,rcv,MPI_COMM_WORLD); rcv.print(); - + if(rank == 0) { Node res; res.set_external((int*)rcv.data_ptr(), 6); res.print(); - + int *res_ptr = res.value(); EXPECT_EQ(res_ptr[0],1); EXPECT_EQ(res_ptr[1],2); @@ -743,10 +743,10 @@ TEST(conduit_mpi_test, gather_simple) //----------------------------------------------------------------------------- -TEST(conduit_mpi_test, gather_using_schema_simple) +TEST(conduit_mpi_test, gather_using_schema_simple) { Node n; - + int rank = mpi::rank(MPI_COMM_WORLD); n["values/a"] = rank+1; @@ -756,17 +756,17 @@ TEST(conduit_mpi_test, gather_using_schema_simple) { n["values/d"] = rank+4; } - + Node rcv; mpi::gather_using_schema(n,rcv,0,MPI_COMM_WORLD); rcv.print(); - + if( rank == 0) { Node res; res.set_external((int*)rcv.data_ptr(), 7); res.print(); - + int *res_ptr = res.value(); EXPECT_EQ(res_ptr[0],1); EXPECT_EQ(res_ptr[1],2); @@ -779,10 +779,10 @@ TEST(conduit_mpi_test, gather_using_schema_simple) } //----------------------------------------------------------------------------- -TEST(conduit_mpi_test, all_gather_using_schema_simple) +TEST(conduit_mpi_test, all_gather_using_schema_simple) { Node n; - + int rank = mpi::rank(MPI_COMM_WORLD); n["values/a"] = rank+1; @@ -792,15 +792,15 @@ TEST(conduit_mpi_test, all_gather_using_schema_simple) { n["values/d"] = rank+4; } - + Node rcv; mpi::all_gather_using_schema(n,rcv,MPI_COMM_WORLD); rcv.print(); - + Node res; res.set_external((int*)rcv.data_ptr(), 7); res.print(); - + int *res_ptr = res.value(); EXPECT_EQ(res_ptr[0],1); EXPECT_EQ(res_ptr[1],2); @@ -814,11 +814,11 @@ TEST(conduit_mpi_test, all_gather_using_schema_simple) //----------------------------------------------------------------------------- -TEST(conduit_mpi_test, bcast) +TEST(conduit_mpi_test, bcast) { int rank = mpi::rank(MPI_COMM_WORLD); int com_size = mpi::size(MPI_COMM_WORLD); - + for(int root = 0; root < com_size; root++) { Node n; @@ -840,8 +840,8 @@ TEST(conduit_mpi_test, bcast) EXPECT_EQ(vals_ptr[0], 11); EXPECT_EQ(vals_ptr[1], 22); EXPECT_EQ(vals_ptr[2], 33); - - CONDUIT_INFO("Bcast from root = " + + CONDUIT_INFO("Bcast from root = " << root << "\n" << "rank: " << rank << " res = " << n.to_json()); @@ -866,14 +866,14 @@ TEST(conduit_mpi_test, bcast) int64 val = n["a/b/c/d/e/f"].value(); EXPECT_EQ(val, 10); - - CONDUIT_INFO("Bcast from root = " + + CONDUIT_INFO("Bcast from root = " << root << "\n" << "rank: " << rank << " res = " << n.to_json()); } - - + + for(int root = 0; root < com_size; root++) { Node n; @@ -892,23 +892,23 @@ TEST(conduit_mpi_test, bcast) std::string val = n["a/b/c/d/e/f"].as_string(); EXPECT_EQ(val, "g"); - - CONDUIT_INFO("Bcast from root = " + + CONDUIT_INFO("Bcast from root = " << root << "\n" << "rank: " << rank << " res = " << n.to_json()); } - + } //----------------------------------------------------------------------------- -TEST(conduit_mpi_test, bcast_using_schema) +TEST(conduit_mpi_test, bcast_using_schema) { int rank = mpi::rank(MPI_COMM_WORLD); int com_size = mpi::size(MPI_COMM_WORLD); - + for(int root = 0; root < com_size; root++) { Node n; @@ -919,7 +919,7 @@ TEST(conduit_mpi_test, bcast_using_schema) vals.push_back(11); vals.push_back(22); vals.push_back(33); - + n.set_external(vals); } @@ -930,8 +930,8 @@ TEST(conduit_mpi_test, bcast_using_schema) EXPECT_EQ(vals_ptr[0], 11); EXPECT_EQ(vals_ptr[1], 22); EXPECT_EQ(vals_ptr[2], 33); - - CONDUIT_INFO("Bcast from root = " + + CONDUIT_INFO("Bcast from root = " << root << "\n" << "rank: " << rank << " res = " << n.to_json()); @@ -952,8 +952,8 @@ TEST(conduit_mpi_test, bcast_using_schema) int64 val = n["a/b/c/d/e/f"].value(); EXPECT_EQ(val, 10); - - CONDUIT_INFO("Bcast from root = " + + CONDUIT_INFO("Bcast from root = " << root << "\n" << "rank: " << rank << " res = " << n.to_json()); @@ -974,14 +974,14 @@ TEST(conduit_mpi_test, bcast_using_schema) std::string val = n["a/b/c/d/e/f"].as_string(); EXPECT_EQ(val, "g"); - - CONDUIT_INFO("Bcast from root = " + + CONDUIT_INFO("Bcast from root = " << root << "\n" << "rank: " << rank << " res = " << n.to_json()); } - - + + for(int root = 0; root < com_size; root++) { Node n; @@ -1001,28 +1001,28 @@ TEST(conduit_mpi_test, bcast_using_schema) int val_a = n["value/a"].to_int(); int val_b = n["value/b"].to_int(); - + EXPECT_EQ(val_a, 1); EXPECT_EQ(val_b, 2); - - CONDUIT_INFO("Bcast from root = " + + CONDUIT_INFO("Bcast from root = " << root << "\n" << "rank: " << rank << " res = " << n.to_json()); } - - + + } //----------------------------------------------------------------------------- -TEST(conduit_mpi_test, bcast_using_schema_non_empty_node) +TEST(conduit_mpi_test, bcast_using_schema_non_empty_node) { int rank = mpi::rank(MPI_COMM_WORLD); int com_size = mpi::size(MPI_COMM_WORLD); - + for(int root = 0; root < com_size; root++) { Node n; @@ -1033,7 +1033,7 @@ TEST(conduit_mpi_test, bcast_using_schema_non_empty_node) vals.push_back(11); vals.push_back(22); vals.push_back(33); - + n["here"].set(vals); } else @@ -1048,8 +1048,8 @@ TEST(conduit_mpi_test, bcast_using_schema_non_empty_node) EXPECT_EQ(vals_ptr[0], 11); EXPECT_EQ(vals_ptr[1], 22); EXPECT_EQ(vals_ptr[2], 33); - - CONDUIT_INFO("Bcast from root = " + + CONDUIT_INFO("Bcast from root = " << root << "\n" << "rank: " << rank << " res = " << n.to_json());