Skip to content

Commit

Permalink
Enforce units throughout operators and data containers.
Browse files Browse the repository at this point in the history
* Enforce consistent units throughout the detector, pixel, and noise classes.

* Use units when doing map-domain I/O.

* Introduce a new Unit trait type for operators and templates, and support
  this trait type throughout the configuration system.

* Maintain units in the noise model classes, including for detector inverse
  variance weights.

* Check units when assigning to DetectorData objects.

* Propagate units when duplicating and redistributing Observations.

* Do not copy internal interval containing all samples when redistributing
  observations.

* Scale detector data when doing arithmetic with the Combine operator

* Replace old collective debug logging with `Logger.debug_rank()`

* Run format_source.sh
  • Loading branch information
tskisner committed Oct 11, 2022
1 parent 68392af commit afa4f10
Show file tree
Hide file tree
Showing 108 changed files with 2,066 additions and 936 deletions.
5 changes: 4 additions & 1 deletion src/toast/_libtoast/accelerator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ class OmpManager {
T * device_ptr(T * buffer) {
auto log = toast::Logger::get();
std::ostringstream o;

// If the device is the host device, return
if (device_is_host()) {
return buffer;
Expand All @@ -55,16 +56,18 @@ class OmpManager {
if (n == 0) {
o.str("");
o << "OmpManager: host ptr " << buffer
<< " is not present- cannot get device pointer";
<< " is not present- cannot get device pointer";
log.error(o.str().c_str());
throw std::runtime_error(o.str().c_str());
}
return static_cast <T *> (mem_.at(vbuffer));

#else // ifdef HAVE_OPENMP_TARGET
o << "OmpManager: OpenMP target support disabled";
log.error(o.str().c_str());
throw std::runtime_error(o.str().c_str());
return NULL;

#endif // ifdef HAVE_OPENMP_TARGET
}

Expand Down
18 changes: 9 additions & 9 deletions src/toast/_libtoast/ops_mapmaker_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -229,15 +229,15 @@ void init_ops_mapmaker_utils(py::module & m) {
)
{
# pragma omp target teams distribute collapse(2) \
is_device_ptr( \
dev_pixels, \
dev_weights, \
dev_det_data, \
dev_det_flags, \
dev_intervals, \
dev_shared_flags, \
dev_zmap \
)
is_device_ptr( \
dev_pixels, \
dev_weights, \
dev_det_data, \
dev_det_flags, \
dev_intervals, \
dev_shared_flags, \
dev_zmap \
)
for (int64_t idet = 0; idet < n_det; idet++) {
for (int64_t iview = 0; iview < n_view; iview++) {
# pragma omp parallel for default(shared)
Expand Down
24 changes: 12 additions & 12 deletions src/toast/_libtoast/ops_pixels_healpix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -420,12 +420,12 @@ void init_ops_pixels_healpix(py::module & m) {
hpix_init(&hp, nside);
if (nest) {
# pragma omp target teams distribute collapse(2) \
is_device_ptr( \
dev_pixels, \
dev_quats, \
dev_flags, \
dev_intervals \
)
is_device_ptr( \
dev_pixels, \
dev_quats, \
dev_flags, \
dev_intervals \
)
for (int64_t idet = 0; idet < n_det; idet++) {
for (int64_t iview = 0; iview < n_view; iview++) {
# pragma omp parallel for default(shared)
Expand Down Expand Up @@ -454,12 +454,12 @@ void init_ops_pixels_healpix(py::module & m) {
}
} else {
# pragma omp target teams distribute collapse(2) \
is_device_ptr( \
dev_pixels, \
dev_quats, \
dev_flags, \
dev_intervals \
)
is_device_ptr( \
dev_pixels, \
dev_quats, \
dev_flags, \
dev_intervals \
)
for (int64_t idet = 0; idet < n_det; idet++) {
for (int64_t iview = 0; iview < n_view; iview++) {
# pragma omp parallel for default(shared)
Expand Down
13 changes: 7 additions & 6 deletions src/toast/_libtoast/ops_pointing_detector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ void init_ops_pointing_detector(py::module & m) {
auto & omgr = OmpManager::get();
int dev = omgr.get_device();
bool offload = (!omgr.device_is_host()) && use_accel;

// What if quats has more dets than we are considering in quat_index?

// This is used to return the actual shape of each buffer
Expand Down Expand Up @@ -152,12 +153,12 @@ void init_ops_pointing_detector(py::module & m) {
)
{
# pragma omp target teams distribute collapse(2) \
is_device_ptr( \
dev_boresight, \
dev_quats, \
dev_flags, \
dev_intervals \
)
is_device_ptr( \
dev_boresight, \
dev_quats, \
dev_flags, \
dev_intervals \
)
for (int64_t idet = 0; idet < n_det; idet++) {
for (int64_t iview = 0; iview < n_view; iview++) {
# pragma omp parallel for default(shared)
Expand Down
20 changes: 10 additions & 10 deletions src/toast/_libtoast/ops_stokes_weights.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -180,12 +180,12 @@ void init_ops_stokes_weights(py::module & m) {
)
{
# pragma omp target teams distribute collapse(2) \
is_device_ptr( \
dev_weights, \
dev_quats, \
dev_hwp, \
dev_intervals \
)
is_device_ptr( \
dev_weights, \
dev_quats, \
dev_hwp, \
dev_intervals \
)
for (int64_t idet = 0; idet < n_det; idet++) {
for (int64_t iview = 0; iview < n_view; iview++) {
# pragma omp parallel for default(shared)
Expand Down Expand Up @@ -292,10 +292,10 @@ void init_ops_stokes_weights(py::module & m) {
)
{
# pragma omp target teams distribute collapse(2) \
is_device_ptr( \
dev_weights, \
dev_intervals \
)
is_device_ptr( \
dev_weights, \
dev_intervals \
)
for (int64_t idet = 0; idet < n_det; idet++) {
for (int64_t iview = 0; iview < n_view; iview++) {
# pragma omp parallel for default(shared)
Expand Down
32 changes: 16 additions & 16 deletions src/toast/_libtoast/template_offset.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,11 +71,11 @@ void init_template_offset(py::module & m) {
{
int64_t offset = amp_offset;
# pragma omp target teams distribute firstprivate(offset) \
is_device_ptr( \
dev_amplitudes, \
dev_det_data, \
dev_intervals \
)
is_device_ptr( \
dev_amplitudes, \
dev_det_data, \
dev_intervals \
)
for (int64_t iview = 0; iview < n_view; iview++) {
# pragma omp parallel for default(shared)
for (
Expand Down Expand Up @@ -185,12 +185,12 @@ void init_template_offset(py::module & m) {
{
int64_t offset = amp_offset;
# pragma omp target teams distribute firstprivate(offset) \
is_device_ptr( \
dev_amplitudes, \
dev_det_data, \
dev_det_flags, \
dev_intervals \
)
is_device_ptr( \
dev_amplitudes, \
dev_det_data, \
dev_det_flags, \
dev_intervals \
)
for (int64_t iview = 0; iview < n_view; iview++) {
# pragma omp parallel for default(shared)
for (
Expand Down Expand Up @@ -289,11 +289,11 @@ void init_template_offset(py::module & m) {
)
{
# pragma omp target \
is_device_ptr( \
dev_amp_in, \
dev_amp_out, \
dev_offset_var \
)
is_device_ptr( \
dev_amp_in, \
dev_amp_out, \
dev_offset_var \
)
{
# pragma omp parallel for default(shared)
for (int64_t iamp = 0; iamp < n_amp; iamp++) {
Expand Down
33 changes: 18 additions & 15 deletions src/toast/_libtoast/tod_filter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,13 +130,16 @@ void init_tod_filter(py::module & m) {
size_t stop_order) {
pybuffer_check_1D <double> (angle);

std::vector <int64_t> temp_shape(2);
std::vector <int64_t> temp_shape(2);

const double * pangle = extract_buffer <double> (angle, "angle", 1, temp_shape, {-1});
int64_t nsample = temp_shape[0];
const double * pangle =
extract_buffer <double> (angle, "angle", 1, temp_shape, {-1});
int64_t nsample = temp_shape[0];

double * ptemplates = extract_buffer <double> (templates, "templates", 2, temp_shape, {-1, nsample});
int64_t ntemplate = temp_shape[0];
double * ptemplates =
extract_buffer <double> (templates, "templates", 2, temp_shape,
{-1, nsample});
int64_t ntemplate = temp_shape[0];

if (ntemplate != 2 * (stop_order - start_order)) {
auto log = toast::Logger::get();
Expand All @@ -146,16 +149,16 @@ void init_tod_filter(py::module & m) {
throw std::runtime_error(o.str().c_str());
}

# pragma omp parallel for schedule(static, 1)
for (auto order = start_order; order < stop_order; ++order) {
size_t offset = 2 * (order - start_order) * nsample;
for (int64_t i=0; i < nsample; ++i) {
ptemplates[offset++] = cos(order * pangle[i]);
}
for (int64_t i=0; i < nsample; ++i) {
ptemplates[offset++] = sin(order * pangle[i]);
}
}
#pragma omp parallel for schedule(static, 1)
for (auto order = start_order; order < stop_order; ++order) {
size_t offset = 2 * (order - start_order) * nsample;
for (int64_t i = 0; i < nsample; ++i) {
ptemplates[offset++] = cos(order * pangle[i]);
}
for (int64_t i = 0; i < nsample; ++i) {
ptemplates[offset++] = sin(order * pangle[i]);
}
}

return;
}, py::arg("angle"), py::arg("templates"), py::arg("start_order"),
Expand Down
Loading

0 comments on commit afa4f10

Please sign in to comment.