-
Notifications
You must be signed in to change notification settings - Fork 62
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
adding raja derived field #1161
Changes from 11 commits
7a74672
24e8e42
51368da
ce6052e
2b31b68
681fd81
21d9ba1
44f14f0
f954f78
9a9dcde
89a6e9a
b3bf81c
7d85bf5
0e3b6eb
da92297
fe02c7d
c2ee648
e947135
28c22e5
a1bf125
e7ae4cd
46f74f9
716ef54
ef5449c
07f13ad
05cea95
1a48615
73ffef9
fba3451
53c2208
0581ee7
24ac645
f123f6a
b7e6bd0
9222dea
d93b340
d4b3aa2
1e977d5
6cd6d7a
728a977
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -153,6 +153,77 @@ conduit::Node dispatch_memory(const conduit::Node &field, | |
return res; | ||
} | ||
|
||
//dispatch memory for a derived field (DF) | ||
template<typename Function, typename Exec> | ||
conduit::Node dispatch_memory_DF(const conduit::Node &l_field, | ||
const conduit::Node &r_field, | ||
std::string component, | ||
const Function &func, | ||
const Exec &exec) | ||
{ | ||
const std::string mem_space = Exec::memory_space; | ||
|
||
conduit::Node res; | ||
if(field_is_float32(l_field)) | ||
{ | ||
if(!field_is_float32(r_field)) | ||
ASCENT_ERROR("Type dispatch: mismatch array types\n"<< | ||
l_field.schema().to_string() << | ||
"\n vs. \n" << | ||
r_field.schema().to_string()); | ||
MemoryInterface<conduit::float32> l_farray(l_field); | ||
MemoryInterface<conduit::float32> r_farray(r_field); | ||
MemoryAccessor<conduit::float32> l_accessor = l_farray.accessor(mem_space,component); | ||
MemoryAccessor<conduit::float32> r_accessor = r_farray.accessor(mem_space,component); | ||
res = func(l_accessor, r_accessor, exec); | ||
} | ||
else if(field_is_float64(l_field)) | ||
{ | ||
if(!field_is_float64(r_field)) | ||
ASCENT_ERROR("Type dispatch: mismatch array types\n"<< | ||
l_field.schema().to_string() << | ||
"\n vs. \n" << | ||
r_field.schema().to_string()); | ||
MemoryInterface<conduit::float64> l_farray(l_field); | ||
MemoryInterface<conduit::float64> r_farray(r_field); | ||
MemoryAccessor<conduit::float64> l_accessor = l_farray.accessor(mem_space,component); | ||
MemoryAccessor<conduit::float64> r_accessor = r_farray.accessor(mem_space,component); | ||
res = func(l_accessor, r_accessor, exec); | ||
} | ||
else if(field_is_int32(l_field)) | ||
{ | ||
if(!field_is_int32(r_field)) | ||
ASCENT_ERROR("Type dispatch: mismatch array types\n"<< | ||
l_field.schema().to_string() << | ||
"\n vs. \n" << | ||
r_field.schema().to_string()); | ||
MemoryInterface<conduit::int32> l_farray(l_field); | ||
MemoryInterface<conduit::int32> r_farray(r_field); | ||
MemoryAccessor<conduit::int32> l_accessor = l_farray.accessor(mem_space,component); | ||
MemoryAccessor<conduit::int32> r_accessor = r_farray.accessor(mem_space,component); | ||
res = func(l_accessor, r_accessor, exec); | ||
} | ||
else if(field_is_int64(l_field)) | ||
{ | ||
if(!field_is_int64(r_field)) | ||
ASCENT_ERROR("Type dispatch: mismatch array types\n"<< | ||
l_field.schema().to_string() << | ||
"\n vs. \n" << | ||
r_field.schema().to_string()); | ||
MemoryInterface<conduit::int64> l_farray(l_field); | ||
MemoryInterface<conduit::int64> r_farray(r_field); | ||
MemoryAccessor<conduit::int64> l_accessor = l_farray.accessor(mem_space,component); | ||
MemoryAccessor<conduit::int64> r_accessor = r_farray.accessor(mem_space,component); | ||
res = func(l_accessor, r_accessor, exec); | ||
} | ||
else | ||
{ | ||
ASCENT_ERROR("Type dispatch: unsupported array type "<< | ||
l_field.schema().to_string()); | ||
} | ||
return res; | ||
} | ||
|
||
template<typename Function> | ||
conduit::Node | ||
exec_dispatch(const conduit::Node &field, std::string component, const Function &func) | ||
|
@@ -195,6 +266,48 @@ exec_dispatch(const conduit::Node &field, std::string component, const Function | |
return res; | ||
} | ||
|
||
template<typename Function> | ||
conduit::Node | ||
exec_dispatch_DF(const conduit::Node &l_field, const conduit::Node &r_field, std::string component, const Function &func) | ||
{ | ||
|
||
conduit::Node res; | ||
const std::string exec_policy = ExecutionManager::execution_policy(); | ||
//std::cout<<"Exec policy "<<exec_policy<<"\n"; | ||
if(exec_policy == "serial") | ||
{ | ||
SerialExec exec; | ||
res = dispatch_memory_DF(l_field, r_field, component, func, exec); | ||
} | ||
#if defined(ASCENT_OPENMP_ENABLED) && defined(ASCENT_RAJA_ENABLED) | ||
else if(exec_policy == "openmp") | ||
{ | ||
OpenMPExec exec; | ||
res = dispatch_memory_DF(l_field, r_field, component, func, exec); | ||
} | ||
#endif | ||
#if defined(ASCENT_CUDA_ENABLED) | ||
else if(exec_policy == "cuda") | ||
{ | ||
CudaExec exec; | ||
res = dispatch_memory_DF(l_field, r_field, component, func, exec); | ||
} | ||
#endif | ||
#if defined(ASCENT_HIP_ENABLED) | ||
else if(exec_policy == "hip") | ||
{ | ||
HipExec exec; | ||
res = dispatch_memory_DF(l_field, r_field, component, func, exec); | ||
} | ||
#endif | ||
else | ||
{ | ||
ASCENT_ERROR("Execution dispatch: unsupported execution policy "<< | ||
exec_policy); | ||
} | ||
return res; | ||
} | ||
|
||
template<typename Function> | ||
conduit::Node | ||
field_dispatch(const conduit::Node &field, const Function &func) | ||
|
@@ -481,6 +594,75 @@ struct SumFunctor | |
} | ||
}; | ||
|
||
struct DFAddFunctor | ||
{ | ||
template<typename T, typename Exec> | ||
conduit::Node operator()(const MemoryAccessor<T> l_accessor, | ||
const MemoryAccessor<T> r_accessor, | ||
const Exec &) const | ||
{ | ||
const int l_size = l_accessor.m_size; | ||
const int r_size = r_accessor.m_size; | ||
bool diff_sizes = false; | ||
int size; | ||
int max_size; | ||
|
||
size = max_size = l_size; | ||
if(l_size != r_size) | ||
{ | ||
size = min(l_size, r_size); | ||
max_size = max(l_size, r_size); | ||
diff_sizes = true; | ||
} | ||
|
||
|
||
// conduit zero initializes this array | ||
conduit::Node res; | ||
res["values"].set(conduit::DataType::float64(max_size)); | ||
double *res_array = res["values"].value(); | ||
|
||
Array<double> field_sums(res_array, max_size); | ||
|
||
double *sums_ptr = field_sums.get_ptr(Exec::memory_space); | ||
|
||
using for_policy = typename Exec::for_policy; | ||
|
||
ascent::forall<for_policy>(0, size, [=] ASCENT_LAMBDA(index_t i) | ||
{ | ||
const T val = l_accessor[i] + r_accessor[i]; | ||
sums_ptr[i] = val; | ||
}); | ||
ASCENT_DEVICE_ERROR_CHECK(); | ||
|
||
if(diff_sizes) | ||
{ | ||
if(l_size > r_size) | ||
{ | ||
ascent::forall<for_policy>(size, l_size, [=] ASCENT_LAMBDA(index_t i) | ||
{ | ||
const T val = l_accessor[i]; | ||
sums_ptr[i] = val; | ||
}); | ||
ASCENT_DEVICE_ERROR_CHECK(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. to confirm my understanding: If one field is larger than another, the output will be sized to the larger field and the remaining vals are simply copied. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Correct. That's what I was going for here, plus zero so simply copied what's extra. I figures it's ok to take them for different sizes? Or do I need to be concerned about topology further down the pipeline? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks, yes this makes sense. We aren't likely to hit these cases often b/c Blueprint fields on the same topology with the same assoc should share cardinality. But its much better to handle the logic vs having a real head scratcher crash down the line. |
||
} | ||
else | ||
{ | ||
ascent::forall<for_policy>(size, r_size, [=] ASCENT_LAMBDA(index_t i) | ||
{ | ||
const T val = r_accessor[i]; | ||
sums_ptr[i] = val; | ||
}); | ||
ASCENT_DEVICE_ERROR_CHECK(); | ||
} | ||
} | ||
|
||
// synch the values back to the host | ||
(void) field_sums.get_host_ptr(); | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @cyrush My domain is going into this functor with "values", and is coming out with a "device_values" added onto it. Have you seen this before? Am I messing something up here? I was able to add a field to nothing and got correct results (as in the final image was correct, the output also had device_values). But then trying to add two fields together, it is as if it is only considering the second field. Not sure if it's pushing the input to "device_values" and then putting the fresh batch of values as "values"? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hmm, never mind. looks like device_values gets generated for the device calculations. So most likely unrelated as to why one field is overwriting the other. |
||
return res; | ||
} | ||
}; | ||
|
||
struct NanFunctor | ||
{ | ||
template<typename T, typename Exec> | ||
|
@@ -742,6 +924,13 @@ array_sum(const conduit::Node &array, | |
|
||
return res; | ||
} | ||
|
||
conduit::Node | ||
derived_field_add_reduction(const conduit::Node &l_field, const conduit::Node &r_field, const std::string &component) | ||
{ | ||
return detail::exec_dispatch_DF(l_field, r_field, component, detail::DFAddFunctor()); | ||
} | ||
|
||
//----------------------------------------------------------------------------- | ||
}; | ||
//----------------------------------------------------------------------------- | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Data is coming out here with device_values