-
Notifications
You must be signed in to change notification settings - Fork 62
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
adding raja derived field #1161
Changes from 4 commits
7a74672
24e8e42
51368da
ce6052e
2b31b68
681fd81
21d9ba1
44f14f0
f954f78
9a9dcde
89a6e9a
b3bf81c
7d85bf5
0e3b6eb
da92297
fe02c7d
c2ee648
e947135
28c22e5
a1bf125
e7ae4cd
46f74f9
716ef54
ef5449c
07f13ad
05cea95
1a48615
73ffef9
fba3451
53c2208
0581ee7
24ac645
f123f6a
b7e6bd0
9222dea
d93b340
d4b3aa2
1e977d5
6cd6d7a
728a977
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -153,6 +153,77 @@ conduit::Node dispatch_memory(const conduit::Node &field, | |
return res; | ||
} | ||
|
||
//dispatch memory for a derived field (DF) | ||
template<typename Function, typename Exec> | ||
conduit::Node dispatch_memory_DF(const conduit::Node &l_field, | ||
const conduit::Node &r_field, | ||
std::string component, | ||
const Function &func, | ||
const Exec &exec) | ||
{ | ||
const std::string mem_space = Exec::memory_space; | ||
|
||
conduit::Node res; | ||
if(field_is_float32(l_field)) | ||
{ | ||
if(!field_is_float32(r_field)) | ||
ASCENT_ERROR("Type dispatch: mismatch array types\n"<< | ||
l_field.schema().to_string() << | ||
"\n vs. \n" << | ||
r_field.schema().to_string()); | ||
MemoryInterface<conduit::float32> l_farray(l_field); | ||
MemoryInterface<conduit::float32> r_farray(r_field); | ||
MemoryAccessor<conduit::float32> l_accessor = l_farray.accessor(mem_space,component); | ||
MemoryAccessor<conduit::float32> r_accessor = r_farray.accessor(mem_space,component); | ||
func(l_accessor, r_accessor, res, exec); | ||
} | ||
else if(field_is_float64(l_field)) | ||
{ | ||
if(!field_is_float64(r_field)) | ||
ASCENT_ERROR("Type dispatch: mismatch array types\n"<< | ||
l_field.schema().to_string() << | ||
"\n vs. \n" << | ||
r_field.schema().to_string()); | ||
MemoryInterface<conduit::float32> l_farray(l_field); | ||
MemoryInterface<conduit::float32> r_farray(r_field); | ||
MemoryAccessor<conduit::float32> l_accessor = l_farray.accessor(mem_space,component); | ||
MemoryAccessor<conduit::float32> r_accessor = r_farray.accessor(mem_space,component); | ||
func(l_accessor, r_accessor, res, exec); | ||
} | ||
else if(field_is_int32(l_field)) | ||
{ | ||
if(!field_is_int32(r_field)) | ||
ASCENT_ERROR("Type dispatch: mismatch array types\n"<< | ||
l_field.schema().to_string() << | ||
"\n vs. \n" << | ||
r_field.schema().to_string()); | ||
MemoryInterface<conduit::float32> l_farray(l_field); | ||
MemoryInterface<conduit::float32> r_farray(r_field); | ||
MemoryAccessor<conduit::float32> l_accessor = l_farray.accessor(mem_space,component); | ||
MemoryAccessor<conduit::float32> r_accessor = r_farray.accessor(mem_space,component); | ||
func(l_accessor, r_accessor, res, exec); | ||
} | ||
else if(field_is_int64(l_field)) | ||
{ | ||
if(!field_is_int64(r_field)) | ||
ASCENT_ERROR("Type dispatch: mismatch array types\n"<< | ||
l_field.schema().to_string() << | ||
"\n vs. \n" << | ||
r_field.schema().to_string()); | ||
MemoryInterface<conduit::float32> l_farray(l_field); | ||
MemoryInterface<conduit::float32> r_farray(r_field); | ||
MemoryAccessor<conduit::float32> l_accessor = l_farray.accessor(mem_space,component); | ||
MemoryAccessor<conduit::float32> r_accessor = r_farray.accessor(mem_space,component); | ||
func(l_accessor, r_accessor, res, exec); | ||
} | ||
else | ||
{ | ||
ASCENT_ERROR("Type dispatch: unsupported array type "<< | ||
l_field.schema().to_string()); | ||
} | ||
return res; | ||
} | ||
|
||
template<typename Function> | ||
conduit::Node | ||
exec_dispatch(const conduit::Node &field, std::string component, const Function &func) | ||
|
@@ -195,6 +266,48 @@ exec_dispatch(const conduit::Node &field, std::string component, const Function | |
return res; | ||
} | ||
|
||
template<typename Function> | ||
conduit::Node | ||
exec_dispatch_DF(const conduit::Node &l_field, const conduit::Node &r_field, std::string component, const Function &func) | ||
{ | ||
|
||
conduit::Node res; | ||
const std::string exec_policy = ExecutionManager::execution_policy(); | ||
//std::cout<<"Exec policy "<<exec_policy<<"\n"; | ||
if(exec_policy == "serial") | ||
{ | ||
SerialExec exec; | ||
res = dispatch_memory_DF(l_field, r_field, component, func, exec); | ||
} | ||
#if defined(ASCENT_OPENMP_ENABLED) && defined(ASCENT_RAJA_ENABLED) | ||
else if(exec_policy == "openmp") | ||
{ | ||
OpenMPExec exec; | ||
res = dispatch_memory_DF(l_field, r_field, component, func, exec); | ||
} | ||
#endif | ||
#if defined(ASCENT_CUDA_ENABLED) | ||
else if(exec_policy == "cuda") | ||
{ | ||
CudaExec exec; | ||
res = dispatch_memory_DF(l_field, r_field, component, func, exec); | ||
} | ||
#endif | ||
#if defined(ASCENT_HIP_ENABLED) | ||
else if(exec_policy == "hip") | ||
{ | ||
HipExec exec; | ||
res = dispatch_memory_DF(l_field, r_field, component, func, exec); | ||
} | ||
#endif | ||
else | ||
{ | ||
ASCENT_ERROR("Execution dispatch: unsupported execution policy "<< | ||
exec_policy); | ||
} | ||
return res; | ||
} | ||
|
||
template<typename Function> | ||
conduit::Node | ||
field_dispatch(const conduit::Node &field, const Function &func) | ||
|
@@ -481,6 +594,67 @@ struct SumFunctor | |
} | ||
}; | ||
|
||
struct DFAddFunctor | ||
{ | ||
template<typename T, typename Exec> | ||
void operator()(const MemoryAccessor<T> l_accessor, | ||
const MemoryAccessor<T> r_accessor, | ||
MemoryAccessor<T> output, | ||
const Exec &) const | ||
{ | ||
const int l_size = l_accessor.m_size; | ||
const int r_size = r_accessor.m_size; | ||
bool diff_sizes = false; | ||
const int size; | ||
const int max_size; | ||
|
||
size = max_size = l_size; | ||
if(l_size != r_size) | ||
{ | ||
size = min(l_size, r_size); | ||
max_size = max(l_size, r_size); | ||
diff_sizes = true; | ||
} | ||
|
||
double values[max_size]; | ||
|
||
using for_policy = typename Exec::for_policy; | ||
using reduce_policy = typename Exec::reduce_policy; | ||
|
||
ascent::forall<for_policy>(0, size, [=] ASCENT_LAMBDA(index_t i) | ||
{ | ||
const T val = l_accessor[i] + r_accessor[i]; | ||
values[i] = val; | ||
}); | ||
ASCENT_DEVICE_ERROR_CHECK(); | ||
|
||
if(diff_sizes) | ||
{ | ||
if(l_size > r_size) | ||
{ | ||
ascent::forall<for_policy>(size, l_size, [=] ASCENT_LAMBDA(index_t i) | ||
{ | ||
const T val = l_accessor[i]; | ||
values[i] = val; | ||
}); | ||
ASCENT_DEVICE_ERROR_CHECK(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. to confirm my understanding: If one field is larger than another, the output will be sized to the larger field and the remaining vals are simply copied. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Correct. That's what I was going for here, plus zero so simply copied what's extra. I figures it's ok to take them for different sizes? Or do I need to be concerned about topology further down the pipeline? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks, yes this makes sense. We aren't likely to hit these cases often b/c Blueprint fields on the same topology with the same assoc should share cardinality. But its much better to handle the logic vs having a real head scratcher crash down the line. |
||
} | ||
else | ||
{ | ||
ascent::forall<for_policy>(size, r_size, [=] ASCENT_LAMBDA(index_t i) | ||
{ | ||
const T val = r_accessor[i]; | ||
values[i] = val; | ||
}); | ||
ASCENT_DEVICE_ERROR_CHECK(); | ||
} | ||
} | ||
|
||
output["values"].set(values); | ||
return; | ||
} | ||
}; | ||
|
||
struct NanFunctor | ||
{ | ||
template<typename T, typename Exec> | ||
|
@@ -742,6 +916,13 @@ array_sum(const conduit::Node &array, | |
|
||
return res; | ||
} | ||
|
||
conduit::Node | ||
derived_field_add_reduction(const conduit::Node &l_field, const conduit::Node &r_field, const std::string &component) | ||
{ | ||
return detail::exec_dispatch_DF(l_field, r_field, component, detail::DFAddFunctor()); | ||
} | ||
|
||
//----------------------------------------------------------------------------- | ||
}; | ||
//----------------------------------------------------------------------------- | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@cyrush blueprint question for getting my data back in my result node. What rules do I need to be following here? Do I need to maintain the domain structure for my output field? If not, am I overwriting my out field with each set? Should I instead loop over all domains append the new field to a vector and then set it at the end?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
In this case, we can't pass const Node if we want to modify it (add a new field)
I think it would would be best is to return just the resulting Node field Node and insert it into the conduit tree at a higher level (in a custom filter)
We will also need to check association of the inputs (element or vertex) are the same and make sure they are both defined on the same topology. That info is in the field node along side the values.
The resulting field needs to propagate that info.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ah ok. This makes sense!