Skip to content

Commit

Permalink
Merge pull request #1224 from LLNL/bugfix/whitlock/partition_skip_irr…
Browse files Browse the repository at this point in the history
…elevant_adjsets

Add build_adjsets option to partitioner so user can skip adjset creation.
  • Loading branch information
BradWhitlock committed Jan 8, 2024
2 parents 8934836 + 56eb427 commit bed407b
Show file tree
Hide file tree
Showing 5 changed files with 217 additions and 41 deletions.
5 changes: 5 additions & 0 deletions src/docs/sphinx/blueprint_mesh_partition.rst
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,11 @@ count domains are combined first.
| | value turns it on and a zero value turns| |
| | it off. | |
+------------------+-----------------------------------------+------------------------------------------+
| build_adjsets | An integer that determines whether | .. code:: yaml |
| | the partitioner should build adjsets, | |
| | if they are present in the selected | build_adjsets: 1 |
| | topology. | |
+------------------+-----------------------------------------+------------------------------------------+
| merge_tolerance | A double value that indicates the max | .. code:: yaml |
| | allowable distance between 2 points | |
| | before they are considered to be | merge_tolerance: 0.000001 |
Expand Down
59 changes: 21 additions & 38 deletions src/libs/blueprint/conduit_blueprint_mesh_partition.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1701,6 +1701,7 @@ Partitioner::Partitioner()
selections(),
selected_fields(),
mapping(true),
build_adjsets(true),
merge_tolerance(1.e-8)
{
}
Expand Down Expand Up @@ -1979,6 +1980,11 @@ Partitioner::initialize(const conduit::Node &n_mesh,
if(options.has_child("mapping"))
mapping = options["mapping"].to_unsigned_int() != 0;

// Get whether we want to build adjsets if they are present. This option
// lets us ignore them.
if(options.has_child("build_adjsets"))
build_adjsets = options["build_adjsets"].to_unsigned_int() != 0;

// Get whether we want to preserve old numbering of vertices, elements.
if(options.has_child("merge_tolerance"))
merge_tolerance = options["merge_tolerance"].to_double();
Expand Down Expand Up @@ -2767,41 +2773,7 @@ Partitioner::copy_field(const conduit::Node &n_field,

const conduit::Node &n_values = n_field["values"];
conduit::Node &new_values = n_new_field["values"];
if(n_values.dtype().is_compact())
{
if(n_values.number_of_children() > 0)
{

// The vel data must be interleaved. We need to use the DataArray element methods for access.


// mcarray.
for(index_t i = 0; i < n_values.number_of_children(); i++)
{
const conduit::Node &n_vals = n_values[i];
conduit::blueprint::mesh::utils::slice_array(n_vals, ids, new_values[n_vals.name()]);
}
}
else
conduit::blueprint::mesh::utils::slice_array(n_values, ids, new_values);
}
else
{
// otherwise, we need to compact our data first
conduit::Node n;
n_values.compact_to(n);
if(n.number_of_children() > 0)
{
// mcarray.
for(index_t i = 0; i < n.number_of_children(); i++)
{
const conduit::Node &n_vals = n[i];
conduit::blueprint::mesh::utils::slice_array(n_vals, ids, new_values[n_vals.name()]);
}
}
else
conduit::blueprint::mesh::utils::slice_array(n, ids, new_values);
}
conduit::blueprint::mesh::utils::slice_field(n_values, ids, new_values);
}

//---------------------------------------------------------------------------
Expand Down Expand Up @@ -4324,9 +4296,20 @@ Partitioner::execute(conduit::Node &output)
std::vector<int> dest_rank, dest_domain, offsets;
map_chunks(chunks, dest_rank, dest_domain, offsets);

init_chunk_adjsets(chunk_assoc_aset, adjset_data);
build_interdomain_adjsets(offsets, domain_to_chunk_map, domain_id_to_node, adjset_data);
build_intradomain_adjsets(offsets, domain_to_chunk_map, adjset_data);
// It is possible that this topology has no associated adjset. If that is true
// then the adjset_data will all be nullptr. We skip adjset construction in
// that case since some of the routines iterate over all adjsets, even when
// they may not apply. We also skip adjset creation if the user turned it off
// in the options.
size_t nullCount = 0;
for(const auto &value : adjset_data)
nullCount += (value == nullptr) ? 1 : 0;
if(build_adjsets && nullCount < adjset_data.size())
{
init_chunk_adjsets(chunk_assoc_aset, adjset_data);
build_interdomain_adjsets(offsets, domain_to_chunk_map, domain_id_to_node, adjset_data);
build_intradomain_adjsets(offsets, domain_to_chunk_map, adjset_data);
}

// Communicate chunks to the right destination ranks
std::vector<Chunk> chunks_to_assemble;
Expand Down
1 change: 1 addition & 0 deletions src/libs/blueprint/conduit_blueprint_mesh_partition.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -740,6 +740,7 @@ class CONDUIT_BLUEPRINT_API Partitioner
std::vector<std::shared_ptr<Selection> > selections;
std::vector<std::string> selected_fields;
bool mapping;
bool build_adjsets;
double merge_tolerance;
};

Expand Down
11 changes: 8 additions & 3 deletions src/libs/blueprint/conduit_blueprint_mesh_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -379,9 +379,9 @@ find_domain_id(const Node &node)
// @brief Slice the n_src array using the indices stored in ids. We use the
// array classes for their [] operators that deal with interleaved
// and non-interleaved arrays.
template <typename T, typename IndexType>
template <typename ArrayType, typename IndexType>
inline void
typed_slice_array(const T &src, const std::vector<IndexType> &ids, T &dest)
typed_slice_array(const ArrayType &src, const std::vector<IndexType> &ids, ArrayType &dest)
{
size_t n = ids.size();
for(size_t i = 0; i < n; i++)
Expand All @@ -402,6 +402,11 @@ slice_array_internal(const conduit::Node &n_src_values,
// before copying it in so assigning to n_dest_values triggers a memory
// allocation.
auto dt = n_src_values.dtype();

// Make sure the destination node is reset so the node will get the
// right dtype when we reinitialize it below.
n_dest_values.reset();
// Allocate the new data.
n_dest_values = DataType(n_src_values.dtype().id(), ids.size());

// Do the slice.
Expand Down Expand Up @@ -564,7 +569,7 @@ slice_field_internal(const conduit::Node &n_src_values,
{
if(n_src_values.number_of_children() > 0)
{
// Reorder an mcarray
// Slice an mcarray
for(conduit::index_t ci = 0; ci < n_src_values.number_of_children(); ci++)
{
const conduit::Node &comp = n_src_values[ci];
Expand Down
182 changes: 182 additions & 0 deletions src/tests/blueprint/t_blueprint_mesh_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include "blueprint_test_helpers.hpp"

#include <cstddef>
#include <algorithm>
#include <vector>
#include <string>
Expand Down Expand Up @@ -429,6 +430,187 @@ TEST(conduit_blueprint_mesh_utils, copy_fields)
EXPECT_EQ(fields[0].name(), "f4");
}

//-----------------------------------------------------------------------------
TEST(conduit_blueprint_mesh_utils, slice_array)
{
struct A
{
double x,y,z;
double f1,f2;
};
A data[] = {{0., 1., 2., 3., 4.},
{5., 6., 7., 8., 9.},
{10., 11., 12., 13., 14.},
{15., 16., 17., 18., 19.},
{20., 21., 22., 23., 24.}};
index_t nelem = 5;
Node n;
n["fields/vector/values/x"].set_external(reinterpret_cast<double *>(data), nelem, offsetof(A, x), sizeof(A));
n["fields/vector/values/y"].set_external(reinterpret_cast<double *>(data), nelem, offsetof(A, y), sizeof(A));
n["fields/vector/values/z"].set_external(reinterpret_cast<double *>(data), nelem, offsetof(A, z), sizeof(A));
n["fields/f1/values"].set_external(reinterpret_cast<double *>(data), nelem, offsetof(A, f1), sizeof(A));
n["fields/f2/values"].set_external(reinterpret_cast<double *>(data), nelem, offsetof(A, f2), sizeof(A));

// First, a basic check.
auto ax = n["fields/vector/values/x"].as_double_array();
auto ay = n["fields/vector/values/y"].as_double_array();
auto az = n["fields/vector/values/z"].as_double_array();
auto af1 = n["fields/f1/values"].as_double_array();
auto af2 = n["fields/f2/values"].as_double_array();
for(index_t i = 0; i < nelem; i++)
{
EXPECT_EQ(data[i].x, ax[i]);
EXPECT_EQ(data[i].y, ay[i]);
EXPECT_EQ(data[i].z, az[i]);
EXPECT_EQ(data[i].f1, af1[i]);
EXPECT_EQ(data[i].f2, af2[i]);
}

// Check the compacted values.
Node ncx, ncy, ncz, ncf1, ncf2;
n["fields/vector/values/x"].compact_to(ncx);
n["fields/vector/values/y"].compact_to(ncy);
n["fields/vector/values/z"].compact_to(ncz);
n["fields/f1/values"].compact_to(ncf1);
n["fields/f2/values"].compact_to(ncf2);
ax = ncx.as_double_array();
ay = ncy.as_double_array();
az = ncz.as_double_array();
af1 = ncf1.as_double_array();
af2 = ncf2.as_double_array();
for(index_t i = 0; i < nelem; i++)
{
EXPECT_EQ(data[i].x, ax[i]);
EXPECT_EQ(data[i].y, ay[i]);
EXPECT_EQ(data[i].z, az[i]);
EXPECT_EQ(data[i].f1, af1[i]);
EXPECT_EQ(data[i].f2, af2[i]);
}

// Slice the arrays with index_t indices
std::vector<index_t> idx1{0, 2, 4};
Node nsx, nsy, nsz, nsf1, nsf2;
conduit::blueprint::mesh::utils::slice_array(n["fields/vector/values/x"], idx1, nsx);
conduit::blueprint::mesh::utils::slice_array(n["fields/vector/values/y"], idx1, nsy);
conduit::blueprint::mesh::utils::slice_array(n["fields/vector/values/z"], idx1, nsz);
conduit::blueprint::mesh::utils::slice_array(n["fields/f1/values"], idx1, nsf1);
conduit::blueprint::mesh::utils::slice_array(n["fields/f2/values"], idx1, nsf2);
ax = nsx.as_double_array();
ay = nsy.as_double_array();
az = nsz.as_double_array();
af1 = nsf1.as_double_array();
af2 = nsf2.as_double_array();
EXPECT_EQ(static_cast<int>(af1.number_of_elements()), static_cast<int>(idx1.size()));
EXPECT_EQ(static_cast<int>(ax.number_of_elements()), static_cast<int>(idx1.size()));
for(size_t i = 0; i < idx1.size(); i++)
{
index_t orig = idx1[i];
EXPECT_EQ(data[orig].x, ax[i]);
EXPECT_EQ(data[orig].y, ay[i]);
EXPECT_EQ(data[orig].z, az[i]);
EXPECT_EQ(data[orig].f1, af1[i]);
EXPECT_EQ(data[orig].f2, af2[i]);
}

// Slice the arrays with int indices
std::vector<int> idx2{1, 3};
conduit::blueprint::mesh::utils::slice_array(n["fields/vector/values/x"], idx2, nsx);
conduit::blueprint::mesh::utils::slice_array(n["fields/vector/values/y"], idx2, nsy);
conduit::blueprint::mesh::utils::slice_array(n["fields/vector/values/z"], idx2, nsz);
conduit::blueprint::mesh::utils::slice_array(n["fields/f1/values"], idx2, nsf1);
conduit::blueprint::mesh::utils::slice_array(n["fields/f2/values"], idx2, nsf2);
ax = nsx.as_double_array();
ay = nsy.as_double_array();
az = nsz.as_double_array();
af1 = nsf1.as_double_array();
af2 = nsf2.as_double_array();
EXPECT_EQ(static_cast<int>(af1.number_of_elements()), static_cast<int>(idx2.size()));
EXPECT_EQ(static_cast<int>(ax.number_of_elements()), static_cast<int>(idx2.size()));
for(index_t i = 0; i < idx2.size(); i++)
{
index_t orig = idx2[i];
EXPECT_EQ(data[orig].x, ax[i]);
EXPECT_EQ(data[orig].y, ay[i]);
EXPECT_EQ(data[orig].z, az[i]);
EXPECT_EQ(data[orig].f1, af1[i]);
EXPECT_EQ(data[orig].f2, af2[i]);
}
}

//-----------------------------------------------------------------------------
TEST(conduit_blueprint_mesh_utils, slice_field)
{
struct A
{
double x,y,z;
int f1;
float f2;
};
A data[] = {{0., 1., 2., 3, 4.f},
{5., 6., 7., 8, 9.f},
{10., 11., 12., 13, 14.f},
{15., 16., 17., 18, 19.f},
{20., 21., 22., 23, 24.f}};
index_t nelem = 5;
Node n;
n["fields/vector/association"] = "element";
n["fields/vector/topology"] = "ignored";
n["fields/vector/values/x"].set_external(reinterpret_cast<double *>(data), nelem, offsetof(A, x), sizeof(A));
n["fields/vector/values/y"].set_external(reinterpret_cast<double *>(data), nelem, offsetof(A, y), sizeof(A));
n["fields/vector/values/z"].set_external(reinterpret_cast<double *>(data), nelem, offsetof(A, z), sizeof(A));
n["fields/f1/association"] = "element";
n["fields/f1/topology"] = "ignored";
n["fields/f1/values"].set_external(reinterpret_cast<int *>(data), nelem, offsetof(A, f1), sizeof(A));
n["fields/f2/association"] = "element";
n["fields/f2/topology"] = "ignored";
n["fields/f2/values"].set_external(reinterpret_cast<float *>(data), nelem, offsetof(A, f2), sizeof(A));

// Slice the arrays with index_t indices
std::vector<index_t> idx1{0, 2, 4};
Node nsvector, nsf1, nsf2;
conduit::blueprint::mesh::utils::slice_field(n["fields/vector/values"], idx1, nsvector["values"]);
conduit::blueprint::mesh::utils::slice_field(n["fields/f1/values"], idx1, nsf1["values"]);
conduit::blueprint::mesh::utils::slice_field(n["fields/f2/values"], idx1, nsf2["values"]);
auto ax = nsvector["values/x"].as_double_array();
auto ay = nsvector["values/y"].as_double_array();
auto az = nsvector["values/z"].as_double_array();
auto af1 = nsf1["values"].as_int_array();
auto af2 = nsf2["values"].as_float_array();
EXPECT_EQ(static_cast<int>(af1.number_of_elements()), static_cast<int>(idx1.size()));
EXPECT_EQ(static_cast<int>(ax.number_of_elements()), static_cast<int>(idx1.size()));
for(size_t i = 0; i < idx1.size(); i++)
{
index_t orig = idx1[i];
EXPECT_EQ(data[orig].x, ax[i]);
EXPECT_EQ(data[orig].y, ay[i]);
EXPECT_EQ(data[orig].z, az[i]);
EXPECT_EQ(data[orig].f1, af1[i]);
EXPECT_EQ(data[orig].f2, af2[i]);
}

// Slice the arrays with int indices
std::vector<int> idx2{1, 3};
conduit::blueprint::mesh::utils::slice_field(n["fields/vector/values"], idx2, nsvector["values"]);
conduit::blueprint::mesh::utils::slice_field(n["fields/f1/values"], idx2, nsf1["values"]);
conduit::blueprint::mesh::utils::slice_field(n["fields/f2/values"], idx2, nsf2["values"]);
auto bx = nsvector["values/x"].as_double_array();
auto by = nsvector["values/y"].as_double_array();
auto bz = nsvector["values/z"].as_double_array();
auto bf1 = nsf1["values"].as_int_array();
auto bf2 = nsf2["values"].as_float_array();
EXPECT_EQ(static_cast<int>(bf1.number_of_elements()), static_cast<int>(idx2.size()));
EXPECT_EQ(static_cast<int>(bx.number_of_elements()), static_cast<int>(idx2.size()));
for(size_t i = 0; i < idx2.size(); i++)
{
index_t orig = idx2[i];
EXPECT_EQ(data[orig].x, bx[i]);
EXPECT_EQ(data[orig].y, by[i]);
EXPECT_EQ(data[orig].z, bz[i]);
EXPECT_EQ(data[orig].f1, bf1[i]);
EXPECT_EQ(data[orig].f2, bf2[i]);
}
}

//-----------------------------------------------------------------------------
TEST(conduit_blueprint_mesh_utils, adjset_compare_pointwise_2d)
{
Expand Down

0 comments on commit bed407b

Please sign in to comment.