Skip to content

Commit

Permalink
OpenMP RTL cleanup: eliminated warnings with -Wcast-qual.
Browse files Browse the repository at this point in the history
Changes are: replaced C-style casts with cons_cast and reinterpret_cast;
type of several counters changed to signed; type of parameters of 32-bit and
64-bit AND and OR intrinsics changes to unsigned; changed files formatted
using clang-format version 3.8.1.

Differential Revision: https://reviews.llvm.org/D34759

llvm-svn: 307020
  • Loading branch information
AndreyChurbanov committed Jul 3, 2017
1 parent 5c787ab commit c47afcd
Show file tree
Hide file tree
Showing 22 changed files with 376 additions and 349 deletions.
26 changes: 12 additions & 14 deletions openmp/runtime/src/kmp.h
Expand Up @@ -788,8 +788,8 @@ typedef enum kmp_cancel_kind_t {

// KMP_HW_SUBSET support:
typedef struct kmp_hws_item {
int num;
int offset;
int num;
int offset;
} kmp_hws_item_t;

extern kmp_hws_item_t __kmp_hws_socket;
Expand Down Expand Up @@ -1533,9 +1533,9 @@ typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
kmp_uint32 ordered_lower;
kmp_uint32 ordered_upper;
#if KMP_OS_WINDOWS
// This var can be placed in the hole between 'tc' and 'parm1', instead of
// 'static_steal_counter'. It would be nice to measure execution times.
// Conditional if/endif can be removed at all.
// This var can be placed in the hole between 'tc' and 'parm1', instead of
// 'static_steal_counter'. It would be nice to measure execution times.
// Conditional if/endif can be removed at all.
kmp_int32 last_upper;
#endif /* KMP_OS_WINDOWS */
} dispatch_private_info32_t;
Expand Down Expand Up @@ -1568,9 +1568,9 @@ typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
kmp_uint64 ordered_lower;
kmp_uint64 ordered_upper;
#if KMP_OS_WINDOWS
// This var can be placed in the hole between 'tc' and 'parm1', instead of
// 'static_steal_counter'. It would be nice to measure execution times.
// Conditional if/endif can be removed at all.
// This var can be placed in the hole between 'tc' and 'parm1', instead of
// 'static_steal_counter'. It would be nice to measure execution times.
// Conditional if/endif can be removed at all.
kmp_int64 last_upper;
#endif /* KMP_OS_WINDOWS */
} dispatch_private_info64_t;
Expand Down Expand Up @@ -2109,7 +2109,7 @@ typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */

#if OMP_40_ENABLED
typedef struct kmp_taskgroup {
kmp_uint32 count; // number of allocated and not yet complete tasks
kmp_int32 count; // number of allocated and not yet complete tasks
kmp_int32 cancel_request; // request for cancellation of this taskgroup
struct kmp_taskgroup *parent; // parent taskgroup
// TODO: change to OMP_50_ENABLED, need to change build tools for this to work
Expand Down Expand Up @@ -2250,10 +2250,10 @@ struct kmp_taskdata { /* aligned during dynamic allocation */
kmp_int32 td_taskwait_thread; /* gtid + 1 of thread encountered taskwait */
KMP_ALIGN_CACHE kmp_internal_control_t
td_icvs; /* Internal control variables for the task */
KMP_ALIGN_CACHE volatile kmp_uint32
KMP_ALIGN_CACHE volatile kmp_int32
td_allocated_child_tasks; /* Child tasks (+ current task) not yet
deallocated */
volatile kmp_uint32
volatile kmp_int32
td_incomplete_child_tasks; /* Child tasks not yet complete */
#if OMP_40_ENABLED
kmp_taskgroup_t
Expand Down Expand Up @@ -2328,7 +2328,7 @@ typedef struct kmp_base_task_team {
#endif

KMP_ALIGN_CACHE
volatile kmp_uint32 tt_unfinished_threads; /* #threads still active */
volatile kmp_int32 tt_unfinished_threads; /* #threads still active */

KMP_ALIGN_CACHE
volatile kmp_uint32
Expand Down Expand Up @@ -2402,7 +2402,6 @@ typedef struct KMP_ALIGN_CACHE kmp_base_info {
kmp_uint64 th_team_bt_intervals;
#endif


#if KMP_AFFINITY_SUPPORTED
kmp_affin_mask_t *th_affin_mask; /* thread's current affinity mask */
#endif
Expand Down Expand Up @@ -3787,7 +3786,6 @@ extern int _You_must_link_with_Intel_OpenMP_library;
extern int _You_must_link_with_Microsoft_OpenMP_library;
#endif


// The routines below are not exported.
// Consider making them 'static' in corresponding source files.
void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
Expand Down
155 changes: 79 additions & 76 deletions openmp/runtime/src/kmp_affinity.cpp

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions openmp/runtime/src/kmp_affinity.h
Expand Up @@ -618,8 +618,10 @@ class AddrUnsPair {
};

static int __kmp_affinity_cmp_Address_labels(const void *a, const void *b) {
const Address *aa = (const Address *)&(((AddrUnsPair *)a)->first);
const Address *bb = (const Address *)&(((AddrUnsPair *)b)->first);
const Address *aa =
(const Address *)&(((AddrUnsPair *)CCAST(void *, a))->first);
const Address *bb =
(const Address *)&(((AddrUnsPair *)CCAST(void *, b))->first);
unsigned depth = aa->depth;
unsigned i;
KMP_DEBUG_ASSERT(depth == bb->depth);
Expand Down Expand Up @@ -765,7 +767,6 @@ class hierarchy_info {
skipPerLevel[i] = 2 * skipPerLevel[i - 1];

uninitialized = initialized; // One writer

}

// Resize the hierarchy if nproc changes to something larger than before
Expand Down Expand Up @@ -832,7 +833,6 @@ class hierarchy_info {

base_num_threads = nproc;
resizing = 0; // One writer

}
};
#endif // KMP_AFFINITY_H
16 changes: 8 additions & 8 deletions openmp/runtime/src/kmp_alloc.cpp
Expand Up @@ -298,12 +298,12 @@ static void __kmp_bget_dequeue(kmp_info_t *th) {
#if USE_CMP_XCHG_FOR_BGET
{
volatile void *old_value = TCR_SYNC_PTR(th->th.th_local.bget_list);
while (!KMP_COMPARE_AND_STORE_PTR(&th->th.th_local.bget_list, old_value,
NULL)) {
while (!KMP_COMPARE_AND_STORE_PTR(&th->th.th_local.bget_list,
CCAST(void *, old_value), NULL)) {
KMP_CPU_PAUSE();
old_value = TCR_SYNC_PTR(th->th.th_local.bget_list);
}
p = (void *)old_value;
p = CCAST(void *, old_value);
}
#else /* ! USE_CMP_XCHG_FOR_BGET */
#ifdef USE_QUEUING_LOCK_FOR_BGET
Expand Down Expand Up @@ -362,15 +362,15 @@ static void __kmp_bget_enqueue(kmp_info_t *th, void *buf
volatile void *old_value = TCR_PTR(th->th.th_local.bget_list);
/* the next pointer must be set before setting bget_list to buf to avoid
exposing a broken list to other threads, even for an instant. */
b->ql.flink = BFH(old_value);
b->ql.flink = BFH(CCAST(void *, old_value));

while (!KMP_COMPARE_AND_STORE_PTR(&th->th.th_local.bget_list, old_value,
buf)) {
while (!KMP_COMPARE_AND_STORE_PTR(&th->th.th_local.bget_list,
CCAST(void *, old_value), buf)) {
KMP_CPU_PAUSE();
old_value = TCR_PTR(th->th.th_local.bget_list);
/* the next pointer must be set before setting bget_list to buf to avoid
exposing a broken list to other threads, even for an instant. */
b->ql.flink = BFH(old_value);
b->ql.flink = BFH(CCAST(void *, old_value));
}
}
#else /* ! USE_CMP_XCHG_FOR_BGET */
Expand Down Expand Up @@ -607,7 +607,7 @@ static void *bget(kmp_info_t *th, bufsize requested_size) {
if (thr->acqfcn != 0) {
if (size > (bufsize)(thr->exp_incr - sizeof(bhead_t))) {
/* Request is too large to fit in a single expansion block.
Try to satisy it by a direct buffer acquisition. */
Try to satisy it by a direct buffer acquisition. */
bdhead_t *bdh;

size += sizeof(bdhead_t) - sizeof(bhead_t);
Expand Down
12 changes: 6 additions & 6 deletions openmp/runtime/src/kmp_barrier.cpp
Expand Up @@ -883,9 +883,9 @@ static void __kmp_hierarchical_barrier_gather(
ANNOTATE_REDUCE_BEFORE(reduce);
ANNOTATE_REDUCE_BEFORE(&team->t.t_bar);
}
(void)KMP_TEST_THEN_AND64(
(volatile kmp_int64 *)&thr_bar->b_arrived,
~(thr_bar->leaf_state)); // clear leaf_state bits
// clear leaf_state bits
KMP_TEST_THEN_AND64(CCAST(kmp_uint64 *, &thr_bar->b_arrived),
~(thr_bar->leaf_state));
}
// Next, wait for higher level children on each child's b_arrived flag
for (kmp_uint32 d = 1; d < thr_bar->my_level;
Expand Down Expand Up @@ -1035,7 +1035,8 @@ static void __kmp_hierarchical_barrier_release(
TCW_8(thr_bar->b_go,
KMP_INIT_BARRIER_STATE); // Reset my b_go flag for next time
} else { // Reset my bits on parent's b_go flag
((char *)&(thr_bar->parent_bar->b_go))[thr_bar->offset] = 0;
(RCAST(volatile char *,
&(thr_bar->parent_bar->b_go)))[thr_bar->offset] = 0;
}
}
thr_bar->wait_flag = KMP_BARRIER_NOT_WAITING;
Expand Down Expand Up @@ -1210,7 +1211,6 @@ static void __kmp_hierarchical_barrier_release(
gtid, team->t.t_id, tid, bt));
}


// End of Barrier Algorithms

// Internal function to do a barrier.
Expand Down Expand Up @@ -1347,7 +1347,7 @@ int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
if (KMP_MASTER_TID(tid)) {
status = 0;
if (__kmp_tasking_mode != tskm_immediate_exec) {
__kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj));
__kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj));
}
#if USE_DEBUGGER
// Let the debugger know: All threads are arrived and starting leaving the
Expand Down
10 changes: 5 additions & 5 deletions openmp/runtime/src/kmp_csupport.cpp
Expand Up @@ -3092,8 +3092,8 @@ void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims,
// __kmp_dispatch_num_buffers)
if (idx != sh_buf->doacross_buf_idx) {
// Shared buffer is occupied, wait for it to be free
__kmp_wait_yield_4((kmp_uint32 *)&sh_buf->doacross_buf_idx, idx, __kmp_eq_4,
NULL);
__kmp_wait_yield_4((volatile kmp_uint32 *)&sh_buf->doacross_buf_idx, idx,
__kmp_eq_4, NULL);
}
// Check if we are the first thread. After the CAS the first thread gets 0,
// others get 1 if initialization is in progress, allocated pointer otherwise.
Expand Down Expand Up @@ -3258,8 +3258,8 @@ void __kmpc_doacross_post(ident_t *loc, int gtid, long long *vec) {
iter_number >>= 5; // divided by 32
flag = 1 << shft;
if ((flag & pr_buf->th_doacross_flags[iter_number]) == 0)
KMP_TEST_THEN_OR32((kmp_int32 *)&pr_buf->th_doacross_flags[iter_number],
(kmp_int32)flag);
KMP_TEST_THEN_OR32(
CCAST(kmp_uint32 *, &pr_buf->th_doacross_flags[iter_number]), flag);
KA_TRACE(20, ("__kmpc_doacross_post() exit: T#%d iter %lld posted\n", gtid,
(iter_number << 5) + shft));
}
Expand All @@ -3285,7 +3285,7 @@ void __kmpc_doacross_fini(ident_t *loc, int gtid) {
(kmp_int64)&sh_buf->doacross_num_done);
KMP_DEBUG_ASSERT(num_done == (kmp_int64)sh_buf->doacross_num_done);
KMP_DEBUG_ASSERT(idx == sh_buf->doacross_buf_idx);
__kmp_thread_free(th, (void *)sh_buf->doacross_flags);
__kmp_thread_free(th, CCAST(kmp_uint32 *, sh_buf->doacross_flags));
sh_buf->doacross_flags = NULL;
sh_buf->doacross_num_done = 0;
sh_buf->doacross_buf_idx +=
Expand Down
36 changes: 20 additions & 16 deletions openmp/runtime/src/kmp_dispatch.cpp
Expand Up @@ -172,15 +172,15 @@ template <>
__forceinline kmp_int32 test_then_add<kmp_int32>(volatile kmp_int32 *p,
kmp_int32 d) {
kmp_int32 r;
r = KMP_TEST_THEN_ADD32(p, d);
r = KMP_TEST_THEN_ADD32(CCAST(kmp_int32 *, p), d);
return r;
}

template <>
__forceinline kmp_int64 test_then_add<kmp_int64>(volatile kmp_int64 *p,
kmp_int64 d) {
kmp_int64 r;
r = KMP_TEST_THEN_ADD64(p, d);
r = KMP_TEST_THEN_ADD64(CCAST(kmp_int64 *, p), d);
return r;
}

Expand All @@ -190,14 +190,14 @@ template <typename T> static __forceinline T test_then_inc_acq(volatile T *p);
template <>
__forceinline kmp_int32 test_then_inc_acq<kmp_int32>(volatile kmp_int32 *p) {
kmp_int32 r;
r = KMP_TEST_THEN_INC_ACQ32(p);
r = KMP_TEST_THEN_INC_ACQ32(CCAST(kmp_int32 *, p));
return r;
}

template <>
__forceinline kmp_int64 test_then_inc_acq<kmp_int64>(volatile kmp_int64 *p) {
kmp_int64 r;
r = KMP_TEST_THEN_INC_ACQ64(p);
r = KMP_TEST_THEN_INC_ACQ64(CCAST(kmp_int64 *, p));
return r;
}

Expand All @@ -207,14 +207,14 @@ template <typename T> static __forceinline T test_then_inc(volatile T *p);
template <>
__forceinline kmp_int32 test_then_inc<kmp_int32>(volatile kmp_int32 *p) {
kmp_int32 r;
r = KMP_TEST_THEN_INC32(p);
r = KMP_TEST_THEN_INC32(CCAST(kmp_int32 *, p));
return r;
}

template <>
__forceinline kmp_int64 test_then_inc<kmp_int64>(volatile kmp_int64 *p) {
kmp_int64 r;
r = KMP_TEST_THEN_INC64(p);
r = KMP_TEST_THEN_INC64(CCAST(kmp_int64 *, p));
return r;
}

Expand Down Expand Up @@ -262,7 +262,7 @@ static UT // unsigned 4- or 8-byte type
register kmp_uint32 (*f)(UT, UT) = pred;
register UT r;

KMP_FSYNC_SPIN_INIT(obj, (void *)spin);
KMP_FSYNC_SPIN_INIT(obj, CCAST(UT *, spin));
KMP_INIT_YIELD(spins);
// main wait spin loop
while (!f(r = *spin, check)) {
Expand Down Expand Up @@ -440,7 +440,7 @@ static void __kmp_dispatch_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref) {
th->th.th_dispatch->th_dispatch_pr_current);
}

KMP_FSYNC_RELEASING(&sh->u.s.ordered_iteration);
KMP_FSYNC_RELEASING(CCAST(UT *, &sh->u.s.ordered_iteration));
#if !defined(KMP_GOMP_COMPAT)
if (__kmp_env_consistency_check) {
if (pr->ordered_bumped != 0) {
Expand Down Expand Up @@ -1162,7 +1162,9 @@ __kmp_dispatch_init(ident_t *loc, int gtid, enum sched_type schedule, T lb,
gtid, my_buffer_index, sh->buffer_index));

th->th.th_dispatch->th_dispatch_pr_current = (dispatch_private_info_t *)pr;
th->th.th_dispatch->th_dispatch_sh_current = (dispatch_shared_info_t *)sh;
th->th.th_dispatch->th_dispatch_sh_current =
RCAST(dispatch_shared_info_t *,
CCAST(dispatch_shared_info_template<UT> *, sh));
#if USE_ITT_BUILD
if (pr->ordered) {
__kmp_itt_ordered_init(gtid);
Expand Down Expand Up @@ -1978,7 +1980,8 @@ static int __kmp_dispatch_next(ident_t *loc, int gtid, kmp_int32 *p_last,
pr->u.p.parm2) { // compare with K*nproc*(chunk+1), K=2 by default
// use dynamic-style shcedule
// atomically inrement iterations, get old value
init = test_then_add<ST>((ST *)&sh->u.s.iteration, (ST)chunkspec);
init = test_then_add<ST>(
RCAST(ST *, CCAST(UT *, &sh->u.s.iteration)), (ST)chunkspec);
remaining = trip - init;
if (remaining <= 0) {
status = 0; // all iterations got by other threads
Expand All @@ -1995,8 +1998,8 @@ static int __kmp_dispatch_next(ident_t *loc, int gtid, kmp_int32 *p_last,
} // if
limit = init + (UT)(remaining *
*(double *)&pr->u.p.parm3); // divide by K*nproc
if (compare_and_swap<ST>((ST *)&sh->u.s.iteration, (ST)init,
(ST)limit)) {
if (compare_and_swap<ST>(RCAST(ST *, CCAST(UT *, &sh->u.s.iteration)),
(ST)init, (ST)limit)) {
// CAS was successful, chunk obtained
status = 1;
--limit;
Expand Down Expand Up @@ -2056,7 +2059,8 @@ static int __kmp_dispatch_next(ident_t *loc, int gtid, kmp_int32 *p_last,
if ((T)remaining < pr->u.p.parm2) {
// use dynamic-style shcedule
// atomically inrement iterations, get old value
init = test_then_add<ST>((ST *)&sh->u.s.iteration, (ST)chunk);
init = test_then_add<ST>(
RCAST(ST *, CCAST(UT *, &sh->u.s.iteration)), (ST)chunk);
remaining = trip - init;
if (remaining <= 0) {
status = 0; // all iterations got by other threads
Expand All @@ -2078,8 +2082,8 @@ static int __kmp_dispatch_next(ident_t *loc, int gtid, kmp_int32 *p_last,
if (rem) // adjust so that span%chunk == 0
span += chunk - rem;
limit = init + span;
if (compare_and_swap<ST>((ST *)&sh->u.s.iteration, (ST)init,
(ST)limit)) {
if (compare_and_swap<ST>(RCAST(ST *, CCAST(UT *, &sh->u.s.iteration)),
(ST)init, (ST)limit)) {
// CAS was successful, chunk obtained
status = 1;
--limit;
Expand Down Expand Up @@ -2716,7 +2720,7 @@ __kmp_wait_yield_4(volatile kmp_uint32 *spinner, kmp_uint32 checker,
register kmp_uint32 (*f)(kmp_uint32, kmp_uint32) = pred;
register kmp_uint32 r;

KMP_FSYNC_SPIN_INIT(obj, (void *)spin);
KMP_FSYNC_SPIN_INIT(obj, CCAST(kmp_uint32 *, spin));
KMP_INIT_YIELD(spins);
// main wait spin loop
while (!f(r = TCR_4(*spin), check)) {
Expand Down
7 changes: 4 additions & 3 deletions openmp/runtime/src/kmp_environment.cpp
Expand Up @@ -147,7 +147,7 @@ char *__kmp_env_get(char const *name) {
void __kmp_env_free(char const **value) {

KMP_DEBUG_ASSERT(value != NULL);
KMP_INTERNAL_FREE((void *)*value);
KMP_INTERNAL_FREE(CCAST(char *, *value));
*value = NULL;

} // func __kmp_env_free
Expand Down Expand Up @@ -475,7 +475,8 @@ void __kmp_env_blk_sort(
kmp_env_blk_t *block // M: Block of environment variables to sort.
) {

qsort((void *)block->vars, block->count, sizeof(kmp_env_var_t),
qsort(CCAST(kmp_env_var_t *, block->vars), block->count,
sizeof(kmp_env_var_t),
(int (*)(void const *, void const *)) & ___kmp_env_var_cmp);

} // __kmp_env_block_sort
Expand All @@ -484,7 +485,7 @@ void __kmp_env_blk_free(
kmp_env_blk_t *block // M: Block of environment variables to free.
) {

KMP_INTERNAL_FREE((void *)block->vars);
KMP_INTERNAL_FREE(CCAST(kmp_env_var_t *, block->vars));
__kmp_str_free(&(block->bulk));

block->count = 0;
Expand Down

0 comments on commit c47afcd

Please sign in to comment.