Skip to content

Commit

Permalink
[OpenMP] Add debug checks for divide by zero (#83300)
Browse files Browse the repository at this point in the history
  • Loading branch information
jpeyton52 committed Mar 12, 2024
1 parent 9ac0315 commit f5334f5
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 3 deletions.
2 changes: 2 additions & 0 deletions openmp/runtime/src/kmp_affinity.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1777,6 +1777,8 @@ static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *const msg_id) {
__kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU);
else
__kmp_nThreadsPerCore = 1; // no CORE found
if (__kmp_nThreadsPerCore == 0)
__kmp_nThreadsPerCore = 1;
__kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
if (nCoresPerPkg == 0)
nCoresPerPkg = 1; // to prevent possible division by 0
Expand Down
23 changes: 20 additions & 3 deletions openmp/runtime/src/kmp_sched.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ char const *traits_t<long>::spec = "ld";
} else if (i > 0) { \
t = (u - l) / i + 1; \
} else { \
KMP_DEBUG_ASSERT(i != 0); \
t = (l - u) / (-i) + 1; \
} \
KMP_COUNT_VALUE(stat, t); \
Expand Down Expand Up @@ -284,6 +285,7 @@ static void __kmp_for_static_init(ident_t *loc, kmp_int32 global_tid,
// upper-lower can exceed the limit of signed type
trip_count = (UT)(*pupper - *plower) / incr + 1;
} else {
KMP_DEBUG_ASSERT(incr != 0);
trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
}

Expand Down Expand Up @@ -318,6 +320,7 @@ static void __kmp_for_static_init(ident_t *loc, kmp_int32 global_tid,
if (plastiter != NULL)
*plastiter = (tid == trip_count - 1);
} else {
KMP_DEBUG_ASSERT(nth != 0);
if (__kmp_static == kmp_sch_static_balanced) {
UT small_chunk = trip_count / nth;
UT extras = trip_count % nth;
Expand Down Expand Up @@ -358,6 +361,7 @@ static void __kmp_for_static_init(ident_t *loc, kmp_int32 global_tid,
case kmp_sch_static_chunked: {
ST span;
UT nchunks;
KMP_DEBUG_ASSERT(chunk != 0);
if (chunk < 1)
chunk = 1;
else if ((UT)chunk > trip_count)
Expand All @@ -383,6 +387,7 @@ static void __kmp_for_static_init(ident_t *loc, kmp_int32 global_tid,
}
case kmp_sch_static_balanced_chunked: {
T old_upper = *pupper;
KMP_DEBUG_ASSERT(nth != 0);
// round up to make sure the chunk is enough to cover all iterations
UT span = (trip_count + nth - 1) / nth;

Expand All @@ -398,8 +403,10 @@ static void __kmp_for_static_init(ident_t *loc, kmp_int32 global_tid,
} else if (*pupper < old_upper)
*pupper = old_upper;

if (plastiter != NULL)
if (plastiter != NULL) {
KMP_DEBUG_ASSERT(chunk != 0);
*plastiter = (tid == ((trip_count - 1) / (UT)chunk));
}
break;
}
default:
Expand All @@ -417,6 +424,7 @@ static void __kmp_for_static_init(ident_t *loc, kmp_int32 global_tid,
// Calculate chunk in case it was not specified; it is specified for
// kmp_sch_static_chunked
if (schedtype == kmp_sch_static) {
KMP_DEBUG_ASSERT(nth != 0);
cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
}
// 0 - "static" schedule
Expand Down Expand Up @@ -547,6 +555,7 @@ static void __kmp_dist_for_static_init(ident_t *loc, kmp_int32 gtid,
// upper-lower can exceed the limit of signed type
trip_count = (UT)(*pupper - *plower) / incr + 1;
} else {
KMP_DEBUG_ASSERT(incr != 0);
trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
}

Expand All @@ -568,6 +577,7 @@ static void __kmp_dist_for_static_init(ident_t *loc, kmp_int32 gtid,
*plastiter = (tid == 0 && team_id == trip_count - 1);
} else {
// Get the team's chunk first (each team gets at most one chunk)
KMP_DEBUG_ASSERT(nteams != 0);
if (__kmp_static == kmp_sch_static_balanced) {
UT chunkD = trip_count / nteams;
UT extras = trip_count % nteams;
Expand Down Expand Up @@ -619,6 +629,7 @@ static void __kmp_dist_for_static_init(ident_t *loc, kmp_int32 gtid,
// upper-lower can exceed the limit of signed type
trip_count = (UT)(*pupperDist - *plower) / incr + 1;
} else {
KMP_DEBUG_ASSERT(incr != 0);
trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
}
KMP_DEBUG_ASSERT(trip_count);
Expand All @@ -637,6 +648,7 @@ static void __kmp_dist_for_static_init(ident_t *loc, kmp_int32 gtid,
if (*plastiter != 0 && !(tid == trip_count - 1))
*plastiter = 0;
} else {
KMP_DEBUG_ASSERT(nth != 0);
if (__kmp_static == kmp_sch_static_balanced) {
UT chunkL = trip_count / nth;
UT extras = trip_count % nth;
Expand Down Expand Up @@ -684,9 +696,11 @@ static void __kmp_dist_for_static_init(ident_t *loc, kmp_int32 gtid,
*pstride = span * nth;
*plower = *plower + (span * tid);
*pupper = *plower + span - incr;
if (plastiter != NULL)
if (plastiter != NULL) {
KMP_DEBUG_ASSERT(chunk != 0);
if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
*plastiter = 0;
}
break;
}
default:
Expand Down Expand Up @@ -809,6 +823,7 @@ static void __kmp_team_static_init(ident_t *loc, kmp_int32 gtid,
// upper-lower can exceed the limit of signed type
trip_count = (UT)(upper - lower) / incr + 1;
} else {
KMP_DEBUG_ASSERT(incr != 0);
trip_count = (UT)(lower - upper) / (-incr) + 1;
}
if (chunk < 1)
Expand All @@ -817,8 +832,10 @@ static void __kmp_team_static_init(ident_t *loc, kmp_int32 gtid,
*p_st = span * nteams;
*p_lb = lower + (span * team_id);
*p_ub = *p_lb + span - incr;
if (p_last != NULL)
if (p_last != NULL) {
KMP_DEBUG_ASSERT(chunk != 0);
*p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
}
// Correct upper bound if needed
if (incr > 0) {
if (*p_ub < *p_lb) // overflow?
Expand Down

0 comments on commit f5334f5

Please sign in to comment.