From 637396426fe6d01c059e3cdc3732bff8a42cc37d Mon Sep 17 00:00:00 2001 From: Ian Henriksen Date: Mon, 7 Oct 2024 09:51:34 -0600 Subject: [PATCH 1/5] Fix stackleft on nvc. --- src/qthread.c | 2 +- test/basics/qthread_stackleft.c | 13 ++++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/qthread.c b/src/qthread.c index 899966143..6a6040ae9 100644 --- a/src/qthread.c +++ b/src/qthread.c @@ -1294,7 +1294,7 @@ size_t API_FUNC qthread_stackleft(void) { /*{{{ */ qthread_t const *f = qthread_internal_self(); if ((f != NULL) && (f->rdata->stack != NULL)) { -#ifdef __INTEL_COMPILER +#if defined(__INTEL_COMPILER) size_t current = (size_t)&f; #else size_t current = (size_t)__builtin_frame_address(0); diff --git a/test/basics/qthread_stackleft.c b/test/basics/qthread_stackleft.c index 667ab3897..db3ac9c86 100644 --- a/test/basics/qthread_stackleft.c +++ b/test/basics/qthread_stackleft.c @@ -11,9 +11,14 @@ static aligned_t x = 0; static aligned_t alldone; +// The structure of this test is fairly trivial so we have to use +// various compiler-specific stuff to prevent TCO from kicking in +// and preventing us from testing that the stack behaves as expected. #ifdef __clang__ #define STACKLEFT_NOINLINE __attribute__((optnone)) -#elif __GNUC__ +#elif defined(__NVCOMPILER) +#define STACKLEFT_NOINLINE __attribute__((noinline)) +#elif defined(__GNUC__) #define STACKLEFT_NOINLINE __attribute__((optimize(0))) #else #define STACKLEFT_NOINLINE @@ -36,6 +41,12 @@ static aligned_t alldone; static STACKLEFT_NOINLINE size_t thread2(size_t left, size_t depth) { size_t foo = qthread_stackleft(); + +#if defined(__NVCOMPILER) + // nvc doesn't currently support enough attributes/pragmas to prevent TCO + // here. This still works though. + asm volatile("" : : "g"(&depth) : "memory"); +#endif iprintf("leveli%i: %zu bytes left\n", (int)depth, foo); test_check(foo < left); if (depth < 5) { thread2(foo, depth + 1); } From 845ad01a1ec017cf0a8fb0e7b9eaff7874a744d5 Mon Sep 17 00:00:00 2001 From: Ian Henriksen Date: Thu, 3 Oct 2024 15:20:36 -0600 Subject: [PATCH 2/5] Add nvc builds on amd64 and ARM. --- .circleci/config.yml | 45 ++++++++++++++++++++++++++++++++++++++++++++ configure.ac | 2 +- 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b528e9c72..854886f22 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -137,6 +137,45 @@ jobs: timeout --foreground -k 10s 4m make check no_output_timeout: 60s + nvc: + parameters: + worker_type: + type: string + scheduler: + type: string + topology: + type: string + machine: + image: ubuntu-2204:2024.04.4 + resource_class: << parameters.worker_type >> + environment: + CC: nvc + CXX: nvc++ + steps: + - checkout + - run: | + curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg + export MACHINE_TYPE=`uname -m` + if [ ${MACHINE_TYPE} == 'x86_64' ]; then echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list; fi + if [ ${MACHINE_TYPE} == 'aarch64' ]; then echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/arm64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list; fi + sudo apt-get update -y + sudo apt-get install -y nvhpc-24-9 + sudo apt-get install -y autoconf automake libtool + sudo apt-get install -y hwloc libhwloc-dev + - run: | + export MACHINE_TYPE=`uname -m` + if [ ${MACHINE_TYPE} == 'x86_64' ]; then export PATH="$PATH:/opt/nvidia/hpc_sdk/Linux_x86_64/24.9/compilers/bin"; fi + if [ ${MACHINE_TYPE} == 'aarch64' ]; then export PATH="$PATH:/opt/nvidia/hpc_sdk/Linux_aarch64/24.9/compilers/bin"; fi + nvc --version + ./autogen.sh + ./configure --enable-picky --disable-fastcontext --with-scheduler=<< parameters.scheduler >> -with-topology=<< parameters.topology >> + make -j2 + make tests -j2 + - run: + command: | + timeout --foreground -k 10s 4m make check + no_output_timeout: 60s + musl: parameters: worker_type: @@ -185,6 +224,12 @@ workflows: parameters: scheduler: [nemesis, sherwood, distrib] topology: ['no', binders, hwloc] + - nvc: + matrix: + parameters: + worker_type: [medium, arm.medium] + scheduler: [nemesis, sherwood, distrib] + topology: ['no', binders, hwloc] - musl: matrix: parameters: diff --git a/configure.ac b/configure.ac index d5332724e..d77276dc8 100644 --- a/configure.ac +++ b/configure.ac @@ -19,7 +19,7 @@ AC_CONFIG_MACRO_DIR([config]) AC_CONFIG_SRCDIR([src/qthread.c]) AM_INIT_AUTOMAKE([foreign subdir-objects dist-bzip2 no-define 1.16 ]) -AM_SILENT_RULES([yes]) +##AM_SILENT_RULES([yes]) # clean some extra things... CLEANFILES="*~ .\#* .gdb_history" From 4cb20f94a5ef7d128c902c5bd85d83ac9e9486e7 Mon Sep 17 00:00:00 2001 From: Ian Henriksen Date: Mon, 7 Oct 2024 13:42:20 -0600 Subject: [PATCH 3/5] Fix various warnings from the nvc compiler. --- include/qt_alloc.h | 2 +- include/qt_int_log.h | 7 +- src/affinity/hwloc.c | 7 +- src/barrier/feb.c | 4 +- src/ds/dictionary/dictionary_shavit.c | 2 +- src/ds/qarray.c | 12 +- src/qloop.c | 4 +- src/qthread.c | 4 +- src/queue.c | 4 +- src/shepherds.c | 2 +- src/sincs/donecount.c | 2 +- src/syncvar.c | 183 ++++++++++++------------ src/teams.c | 6 +- src/threadqueues/nemesis_threadqueues.c | 8 +- src/touch.c | 16 +-- test/basics/qtimer.c | 2 +- test/features/allpairs.c | 4 +- test/features/subteams.c | 2 +- test/stress/feb_stream.c | 2 +- test/stress/syncvar_stream.c | 6 +- 20 files changed, 136 insertions(+), 143 deletions(-) diff --git a/include/qt_alloc.h b/include/qt_alloc.h index c25633daf..712f567bb 100644 --- a/include/qt_alloc.h +++ b/include/qt_alloc.h @@ -27,6 +27,6 @@ void INTERNAL qt_internal_alignment_init(void); #pragma warning(disable : 191) #endif extern size_t _pagesize; -#define pagesize ((const size_t)_pagesize) +#define pagesize ((size_t)_pagesize) #endif // ifndef HAVE_QT_ALIGNED_ALLOC_H /* vim:set expandtab: */ diff --git a/include/qt_int_log.h b/include/qt_int_log.h index c65802a8a..73de529e0 100644 --- a/include/qt_int_log.h +++ b/include/qt_int_log.h @@ -6,9 +6,10 @@ static char const LogTable256[256] = { #define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n - -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, - 3, 3, 3, 3, 3, LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), - LT(6), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)}; + (char)-1, 0, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, + LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6), LT(7), + LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)}; static inline uint32_t QT_INT_LOG(uint32_t v) { uint32_t r; diff --git a/src/affinity/hwloc.c b/src/affinity/hwloc.c index 537fb87de..c29f42ce7 100644 --- a/src/affinity/hwloc.c +++ b/src/affinity/hwloc.c @@ -233,13 +233,14 @@ void INTERNAL qt_affinity_init(qthread_shepherd_id_t *nbshepherds, /* first, look for an exact match in width and find the proper depth */ unsigned int maxdepth = hwloc_topology_get_depth(topology); unsigned int realdepth; - unsigned int fl_depth = -1; // first depth with #objs larger than nbsheps + unsigned int fl_depth = + (unsigned int)-1; // first depth with #objs larger than nbsheps for (realdepth = 0; realdepth < maxdepth && shep_depth == -1; ++realdepth) { unsigned int num = num_usable_by_depth(realdepth); if (num == *nbshepherds) { shep_depth = realdepth; - } else if ((num > *nbshepherds) && (fl_depth == -1)) { + } else if ((num > *nbshepherds) && (fl_depth == (unsigned int)-1)) { fl_depth = realdepth; } } @@ -247,7 +248,7 @@ void INTERNAL qt_affinity_init(qthread_shepherd_id_t *nbshepherds, /* should we use the last _smaller_, or the first _larger_ ? */ /* first option means overlapping but we can use all the cores */ /* second option means no overlapping, but cores will go unused */ - if ((shep_depth == -1) && (fl_depth != -1)) { + if ((shep_depth == -1) && (fl_depth != (unsigned int)-1)) { /* first larger then */ shep_depth = fl_depth; } diff --git a/src/barrier/feb.c b/src/barrier/feb.c index 43cf4ce29..480f727ad 100644 --- a/src/barrier/feb.c +++ b/src/barrier/feb.c @@ -91,7 +91,7 @@ void API_FUNC qt_barrier_enter(qt_barrier_t *b) { qthread_readFF(NULL, &b->out_gate); } /* I'm on the way out, so decrement the blocker count */ - waiters = qthread_incr(&b->blockers, -1) - 1; + waiters = qthread_incr(&b->blockers, (aligned_t)-1) - 1u; if (waiters == 0) { /* last guy out of the barrier, close the out_gate, open the in_gate */ qthread_empty(&b->out_gate); @@ -119,7 +119,7 @@ void qt_global_barrier(void) { void qt_global_barrier_init(size_t size, int debug) { if (global_barrier == NULL) { - global_barrier = qt_barrier_create(size, 0); + global_barrier = qt_barrier_create(size, REGION_BARRIER); assert(global_barrier); } } diff --git a/src/ds/dictionary/dictionary_shavit.c b/src/ds/dictionary/dictionary_shavit.c index 34adec208..31200355a 100644 --- a/src/ds/dictionary/dictionary_shavit.c +++ b/src/ds/dictionary/dictionary_shavit.c @@ -386,7 +386,7 @@ static inline int qt_hash_remove(qt_hash h, qt_key_t const key) { h->op_cleanup)) { return 0; } - qthread_incr(&h->count, -1); + qthread_incr(&h->count, (aligned_t)-1); return 1; } diff --git a/src/ds/qarray.c b/src/ds/qarray.c index ff6b138f3..dd8425a9d 100644 --- a/src/ds/qarray.c +++ b/src/ds/qarray.c @@ -422,7 +422,7 @@ void qarray_destroy(qarray *a) { /*{{{ */ qthread_incr( &chunk_distribution_tracker[qarray_internal_segment_shep_read( a, segmenthead)], - -1); + (aligned_t)-1); } break; } @@ -435,14 +435,14 @@ void qarray_destroy(qarray *a) { /*{{{ */ for (segment = 0; segment < segment_count; segment++) { qthread_incr(&chunk_distribution_tracker[qarray_internal_shepof_segidx( a, segment)], - -1); + (aligned_t)-1); } break; } case ALL_SAME: qthread_incr(&chunk_distribution_tracker[a->dist_specific.dist_shep], - -1 * (a->count / a->segment_size + - ((a->count % a->segment_size) ? 1 : 0))); + ((aligned_t)-1) * (a->count / a->segment_size + + ((a->count % a->segment_size) ? 1 : 0))); break; } #ifdef QTHREAD_HAVE_MEM_AFFINITY @@ -1202,7 +1202,7 @@ void qarray_set_shepof(qarray *a, #endif /* ifdef QTHREAD_HAVE_MEM_AFFINITY */ qthread_incr(&chunk_distribution_tracker[shep], segment_count); qthread_incr(&chunk_distribution_tracker[a->dist_specific.dist_shep], - -1 * segment_count); + ((aligned_t)-1) * segment_count); a->dist_specific.dist_shep = shep; } return; @@ -1223,7 +1223,7 @@ void qarray_set_shepof(qarray *a, } #endif /* ifdef QTHREAD_HAVE_MEM_AFFINITY */ qthread_incr(&chunk_distribution_tracker[shep], 1); - qthread_incr(&chunk_distribution_tracker[cur_shep], -1); + qthread_incr(&chunk_distribution_tracker[cur_shep], (aligned_t)-1); qarray_internal_segment_shep_write(a, seghead, shep); } } diff --git a/src/qloop.c b/src/qloop.c index a0a718043..332031afb 100644 --- a/src/qloop.c +++ b/src/qloop.c @@ -1017,7 +1017,7 @@ static aligned_t qqloop_wrapper(void *arg_void) { /*{{{*/ if (!qthread_shep_ok()) { /* my shepherd has been disabled while I was running */ safeexit = 0; - qthread_incr(&(stat->activesheps), -1); + qthread_incr(&(stat->activesheps), (aligned_t)-1); break; } } while (get_iters(iq, stat, &range)); @@ -1131,7 +1131,7 @@ void API_FUNC qt_loop_queue_addworker( if (loop->stat.donecount == 0) { qthread_fork_to((qthread_f)qqloop_wrapper, loop->qwa + shep, NULL, shep); } else { - qthread_incr(&(loop->stat.activesheps), -1); + qthread_incr(&(loop->stat.activesheps), (aligned_t)-1); } } /*}}}*/ diff --git a/src/qthread.c b/src/qthread.c index 6a6040ae9..c8e9c0423 100644 --- a/src/qthread.c +++ b/src/qthread.c @@ -694,7 +694,7 @@ int API_FUNC qthread_initialize(void) { /*{{{ */ /* initialize the shepherds as having no affinity */ for (i = 0; i < nshepherds; i++) { - qlib->shepherds[i].node = -1; + qlib->shepherds[i].node = UINT32_MAX; qlib->shepherds[i].shep_dists = NULL; qlib->shepherds[i].sorted_sheplist = NULL; qlib->shepherds[i].workers = @@ -1287,7 +1287,7 @@ API_FUNC void *qthread_tos(void) { API_FUNC void *qthread_bos(void) { qthread_t const *f = qthread_internal_self(); - return f->rdata->stack + qlib->qthread_stack_size; + return (void *)((char *)f->rdata->stack + qlib->qthread_stack_size); } size_t API_FUNC qthread_stackleft(void) { /*{{{ */ diff --git a/src/queue.c b/src/queue.c index f69069691..a074b423f 100644 --- a/src/queue.c +++ b/src/queue.c @@ -110,7 +110,7 @@ int API_FUNC qthread_queue_release_one(qthread_queue_t q) { break; case NEMESIS_LENGTH: t = qthread_queue_internal_NEMESIS_dequeue(&q->q.nemesis); - qthread_incr(&q->q.nemesis.length, -1); + qthread_incr(&q->q.nemesis.length, (aligned_t)-1); break; case CAPPED: t = qthread_queue_internal_capped_dequeue(&q->q.capped); break; default: QTHREAD_TRAP(); @@ -284,7 +284,7 @@ void INTERNAL qthread_queue_internal_capped_enqueue(qthread_queue_capped_t *q, offset = qthread_incr(&q->membercount, 1); qassert_retvoid(offset >= q->maxmembers); q->members[offset] = t; - qthread_incr(&q->busy, -1); + qthread_incr(&q->busy, (aligned_t)-1); } qthread_t INTERNAL * diff --git a/src/shepherds.c b/src/shepherds.c index 7e66a129a..dae4adec7 100644 --- a/src/shepherds.c +++ b/src/shepherds.c @@ -159,7 +159,7 @@ qthread_disable_shepherd(qthread_shepherd_id_t const shep) { /*{{{ */ return QTHREAD_NOT_ALLOWED; } qthread_internal_incr( - &(qlib->nshepherds_active), &(qlib->nshepherds_active_lock), -1); + &(qlib->nshepherds_active), &(qlib->nshepherds_active_lock), (aligned_t)-1); (void)QT_CAS(qlib->shepherds[shep].active, 1, 0); return QTHREAD_SUCCESS; } /*}}} */ diff --git a/src/sincs/donecount.c b/src/sincs/donecount.c index 6462bb4f2..b1cf48ce3 100644 --- a/src/sincs/donecount.c +++ b/src/sincs/donecount.c @@ -292,7 +292,7 @@ void API_FUNC qt_sinc_submit(qt_sinc_t *restrict sinc_, } // Update counter - qt_sinc_count_t count = qthread_incr(&sinc->counter, -1); + qt_sinc_count_t count = qthread_incr(&sinc->counter, (aligned_t)-1); assert(count > 0); if (1 == count) { // This is the final submit qt_sinc_internal_collate(sinc_); diff --git a/src/syncvar.c b/src/syncvar.c index 30310f3c6..f4c1b6be7 100644 --- a/src/syncvar.c +++ b/src/syncvar.c @@ -142,8 +142,8 @@ static uint64_t qthread_mwaitc(syncvar_t *restrict const addr, assert(err != NULL); e = *err; - e.zf = 0; - e.cf = 1; + e.zf = 0u; + e.cf = 1u; do { #if (QTHREAD_ASSEMBLY_ARCH == QTHREAD_POWERPC32) /* This applies for any 32-bit architecture with a valid 32-bit CAS @@ -194,10 +194,10 @@ static uint64_t qthread_mwaitc(syncvar_t *restrict const addr, ***************************************************/ if (statemask & (1 << locked.u.s.state)) { /* this is a state of interest, so fill the err struct */ - e.cf = 0; - e.sf = (unsigned char)(locked.u.s.state & 1); - e.pf = (unsigned char)((locked.u.s.state >> 1) & 1); - e.of = (unsigned char)((locked.u.s.state >> 2) & 1); + e.cf = 0u; + e.sf = (unsigned char)(locked.u.s.state & 1u); + e.pf = (unsigned char)((locked.u.s.state >> 1u) & 1u); + e.of = (unsigned char)((locked.u.s.state >> 2u) & 1u); *err = e; return locked.u.s.data; } else { @@ -234,7 +234,7 @@ void INTERNAL qt_syncvar_subsystem_init(uint_fast8_t need_sync) { } int qthread_syncvar_status(syncvar_t *const v) { /*{{{ */ - eflags_t e = {0, 0, 0, 0, 0}; + eflags_t e = {0u, 0u, 0u, 0u, 0u}; unsigned int realret; /* If I've done an atomic read @@ -249,7 +249,7 @@ int qthread_syncvar_status(syncvar_t *const v) { /*{{{ */ return (local_copy_of_v.u.s.state & 0x2) ? 0 : 1; } (void)qthread_mwaitc(v, 0xff, INT_MAX, &e); - qassert_ret(e.cf == 0, + qassert_ret(e.cf == 0u, QTHREAD_TIMEOUT); /* there better not have been a timeout */ local_copy_of_v.u.w = atomic_load_explicit((_Atomic uint64_t *)v, memory_order_relaxed); @@ -354,7 +354,7 @@ qthread_syncvar_blocker_func(void *dest, void *src, blocker_type t) { /*{{{*/ int API_FUNC qthread_syncvar_readFF(uint64_t *restrict dest, syncvar_t *restrict src) { /*{{{ */ assert(qthread_library_initialized); - eflags_t e = {0, 0, 0, 0, 0}; + eflags_t e = {0u, 0u, 0u, 0u, 0u}; uint64_t ret; qthread_t *me = qthread_internal_self(); assert(src); @@ -392,9 +392,9 @@ int API_FUNC qthread_syncvar_readFF(uint64_t *restrict dest, qthread_addrres_t *X; ret = qthread_mwaitc(src, SYNCFEB_ANY, INT_MAX, &e); - qassert_ret(e.cf == 0, + qassert_ret(e.cf == 0u, QTHREAD_TIMEOUT); /* there better not have been a timeout */ - if (e.pf == 0) { /* it got full! */ + if (e.pf == 0u) { /* it got full! */ goto locked_full; } QTHREAD_COUNT_THREADS_BINCOUNTER(febs, lockbin); @@ -463,7 +463,7 @@ int API_FUNC qthread_syncvar_readFF(uint64_t *restrict dest, int API_FUNC qthread_syncvar_readFF_nb(uint64_t *restrict dest, syncvar_t *restrict src) { /*{{{ */ - eflags_t e = {0, 0, 0, 0, 0}; + eflags_t e = {0u, 0u, 0u, 0u, 0u}; uint64_t ret; qthread_t *me = qthread_internal_self(); @@ -498,7 +498,7 @@ int API_FUNC qthread_syncvar_readFF_nb(uint64_t *restrict dest, return QTHREAD_OPFAIL; } else { /* at this point, the syncvar is locked and e.pf should be 0 */ - assert(e.pf == 0); + assert(e.pf == 0u); UNLOCK_THIS_MODIFIED_SYNCVAR(src, ret, e.sf); if (dest) { *dest = ret; } } @@ -507,7 +507,7 @@ int API_FUNC qthread_syncvar_readFF_nb(uint64_t *restrict dest, int API_FUNC qthread_syncvar_fill(syncvar_t *restrict addr) { /*{{{ */ assert(qthread_library_initialized); - eflags_t e = {0, 0, 0, 0, 0}; + eflags_t e = {0u, 0u, 0u, 0u, 0u}; uint64_t ret; qthread_shepherd_t *shep = qthread_internal_getshep(); @@ -515,16 +515,16 @@ int API_FUNC qthread_syncvar_fill(syncvar_t *restrict addr) { /*{{{ */ if (!shep) { return qthread_syncvar_nonblocker_func(addr, NULL, FILL); } ret = qthread_mwaitc(addr, SYNCFEB_ANY, INT_MAX, &e); - qassert_ret(e.cf == 0, + qassert_ret(e.cf == 0u, QTHREAD_TIMEOUT); /* there better not have been a timeout */ - if (e.pf == 1) { /* currently empty, so it needs to change state */ - if (e.sf == 1) { /* waiters! */ + if (e.pf == 1u) { /* currently empty, so it needs to change state */ + if (e.sf == 1u) { /* waiters! */ int const lockbin = QTHREAD_CHOOSE_STRIPE(addr); qthread_addrstat_t *m; QTHREAD_COUNT_THREADS_BINCOUNTER(febs, lockbin); - e.sf = 0; // I'm releasing waiters - e.pf = 0; // I'm going to mark this as full + e.sf = 0u; // I'm releasing waiters + e.pf = 0u; // I'm going to mark this as full #ifdef LOCK_FREE_FEBS do { m = (qthread_addrstat_t *)qt_hash_get(syncvars[lockbin], (void *)addr); @@ -558,9 +558,9 @@ int API_FUNC qthread_syncvar_fill(syncvar_t *restrict addr) { /*{{{ */ QTHREAD_FASTLOCK_LOCK(&(m->lock)); #endif /* ifdef LOCK_FREE_FEBS */ if (m->FEQ) { - e.pf = 1; // back to being empty + e.pf = 1u; // back to being empty if (m->FEQ->next) { - e.sf = 1; // only one will be dequeued, so it'll still have waiters + e.sf = 1u; // only one will be dequeued, so it'll still have waiters } } UNLOCK_THIS_MODIFIED_SYNCVAR(addr, ret, (e.pf << 1) | e.sf); @@ -568,11 +568,11 @@ int API_FUNC qthread_syncvar_fill(syncvar_t *restrict addr) { /*{{{ */ assert(m->EFQ == NULL); // someone snuck in! qthread_syncvar_gotlock_fill(shep, m, addr, ret); } else { - assert(e.sf == 0); /* no waiters */ + assert(e.sf == 0u); /* no waiters */ UNLOCK_THIS_MODIFIED_SYNCVAR(addr, ret, 0); } } else { /* already full, so just release the lock */ - assert(e.pf == 0); + assert(e.pf == 0u); UNLOCK_THIS_MODIFIED_SYNCVAR(addr, ret, e.sf); } return QTHREAD_SUCCESS; @@ -580,7 +580,7 @@ int API_FUNC qthread_syncvar_fill(syncvar_t *restrict addr) { /*{{{ */ int API_FUNC qthread_syncvar_empty(syncvar_t *restrict addr) { /*{{{ */ assert(qthread_library_initialized); - eflags_t e = {0, 0, 0, 0, 0}; + eflags_t e = {0u, 0u, 0u, 0u, 0u}; uint64_t ret; qthread_shepherd_t *shep = qthread_internal_getshep(); @@ -588,16 +588,16 @@ int API_FUNC qthread_syncvar_empty(syncvar_t *restrict addr) { /*{{{ */ if (!shep) { return qthread_syncvar_nonblocker_func(addr, NULL, EMPTY); } ret = qthread_mwaitc(addr, SYNCFEB_ANY, INT_MAX, &e); - qassert_ret(e.cf == 0, + qassert_ret(e.cf == 0u, QTHREAD_TIMEOUT); /* there better not have been a timeout */ - if (e.pf == 0) { /* currently full, so it needs to change state */ - if (e.sf == 1) { /* waiters! */ + if (e.pf == 0u) { /* currently full, so it needs to change state */ + if (e.sf == 1u) { /* waiters! */ int const lockbin = QTHREAD_CHOOSE_STRIPE(addr); qthread_addrstat_t *m; QTHREAD_COUNT_THREADS_BINCOUNTER(febs, lockbin); - e.sf = 0; // released! - // wanted to mark it empty, but the waiters will fill it + e.sf = 0u; // released! + // wanted to mark it empty, but the waiters will fill it #ifdef LOCK_FREE_FEBS do { m = (qthread_addrstat_t *)qt_hash_get(syncvars[lockbin], (void *)addr); @@ -632,16 +632,16 @@ int API_FUNC qthread_syncvar_empty(syncvar_t *restrict addr) { /*{{{ */ #endif /* ifdef LOCK_FREE_FEBS */ assert(m->EFQ); // otherwise there weren't really any waiters assert(m->FFQ == NULL && m->FEQ == NULL); // someone snuck in! - if (m->EFQ->next) { e.sf = 1; } + if (m->EFQ->next) { e.sf = 1u; } // addr->u.w = BUILD_UNLOCKED_SYNCVAR(ret, e.sf); // this must be done by // gotlock_empty so we know what value to write qthread_syncvar_gotlock_empty(shep, m, addr, e.sf); } else { - assert(e.sf == 0); /* no waiters */ + assert(e.sf == 0u); /* no waiters */ UNLOCK_THIS_MODIFIED_SYNCVAR(addr, ret, SYNCFEB_STATE_EMPTY_NO_WAITERS); } } else { /* already empty, so just release the lock */ - assert(e.pf == 1); + assert(e.pf == 1u); UNLOCK_THIS_MODIFIED_SYNCVAR( addr, ret, SYNCFEB_STATE_EMPTY_NO_WAITERS | e.sf); } @@ -651,7 +651,7 @@ int API_FUNC qthread_syncvar_empty(syncvar_t *restrict addr) { /*{{{ */ int API_FUNC qthread_syncvar_readFE(uint64_t *restrict dest, syncvar_t *restrict src) { /*{{{ */ assert(qthread_library_initialized); - eflags_t e = {0, 0, 0, 0, 0}; + eflags_t e = {0u, 0u, 0u, 0u, 0u}; uint64_t ret; int const lockbin = QTHREAD_CHOOSE_STRIPE(src); qthread_t *me = qthread_internal_self(); @@ -671,10 +671,10 @@ int API_FUNC qthread_syncvar_readFE(uint64_t *restrict dest, qthread_addrres_t *X; ret = qthread_mwaitc(src, SYNCFEB_ANY, INT_MAX, &e); - qassert_ret(e.cf == 0, + qassert_ret(e.cf == 0u, QTHREAD_TIMEOUT); /* there better not have been a timeout */ - if (e.pf == 0) { /* it got full! */ - if (e.sf == 1) { /* it got full with waiters! */ + if (e.pf == 0u) { /* it got full! */ + if (e.sf == 1u) { /* it got full with waiters! */ goto locked_full_waiters; } else { goto locked_full; @@ -738,12 +738,12 @@ int API_FUNC qthread_syncvar_readFE(uint64_t *restrict dest, &me->thread_state, QTHREAD_STATE_FEB_BLOCKED, memory_order_relaxed); me->rdata->blockedon.addr = m; qthread_back_to_master(me); - } else if (e.sf == 1) { /* waiters! */ + } else if (e.sf == 1u) { /* waiters! */ qthread_addrstat_t *m; locked_full_waiters: - assert(e.pf == 0); // otherwise we should have gotten a timeout - e.sf = 0; // released! + assert(e.pf == 0u); // otherwise we should have gotten a timeout + e.sf = 0u; // released! // wanted to mark it empty (pf=1), but the waiters will fill it #ifdef LOCK_FREE_FEBS do { @@ -780,14 +780,14 @@ int API_FUNC qthread_syncvar_readFE(uint64_t *restrict dest, assert(m->EFQ); // otherwise there weren't really any waiters assert(m->FFQ == NULL && m->FEQ == NULL); // someone snuck in! if (m->EFQ->next) { // there will be a waiter still waiting - e.sf = 1; + e.sf = 1u; } // src->u.w = BUILD_UNLOCKED_SYNCVAR(ret, e.sf); // this must be done by // gotlock_empty so we know what value to write qthread_syncvar_gotlock_empty(me->rdata->shepherd_ptr, m, src, e.sf); } else { locked_full: - assert(e.pf == 0); // otherwise this isn't really full + assert(e.pf == 0u); // otherwise this isn't really full UNLOCK_THIS_MODIFIED_SYNCVAR(src, ret, SYNCFEB_STATE_EMPTY_NO_WAITERS); } if (dest) { *dest = ret; } @@ -796,7 +796,7 @@ int API_FUNC qthread_syncvar_readFE(uint64_t *restrict dest, int API_FUNC qthread_syncvar_readFE_nb(uint64_t *restrict dest, syncvar_t *restrict src) { /*{{{ */ - eflags_t e = {0, 0, 0, 0, 0}; + eflags_t e = {0u, 0u, 0u, 0u, 0u}; uint64_t ret; int const lockbin = QTHREAD_CHOOSE_STRIPE(src); qthread_t *me = qthread_internal_self(); @@ -813,11 +813,11 @@ int API_FUNC qthread_syncvar_readFE_nb(uint64_t *restrict dest, ret = qthread_mwaitc(src, SYNCFEB_FULL, 1, &e); if (e.cf) { /* there was a timeout */ return QTHREAD_OPFAIL; - } else if (e.sf == 1) { /* waiters! */ + } else if (e.sf == 1u) { /* waiters! */ qthread_addrstat_t *m; - assert(e.pf == 0); // otherwise we should have gotten a timeout - e.sf = 0; // released! + assert(e.pf == 0u); // otherwise we should have gotten a timeout + e.sf = 0u; // released! QTHREAD_COUNT_THREADS_BINCOUNTER(febs, lockbin); // wanted to mark it empty (pf=1), but the waiters will fill it #ifdef LOCK_FREE_FEBS @@ -855,13 +855,13 @@ int API_FUNC qthread_syncvar_readFE_nb(uint64_t *restrict dest, assert(m->EFQ); // otherwise there weren't really any waiters assert(m->FFQ == NULL && m->FEQ == NULL); // someone snuck in! if (m->EFQ->next) { // there will be a waiter still waiting - e.sf = 1; + e.sf = 1u; } // src->u.w = BUILD_UNLOCKED_SYNCVAR(ret, e.sf); // this must be done by // gotlock_empty so we know what value to write qthread_syncvar_gotlock_empty(me->rdata->shepherd_ptr, m, src, e.sf); } else { - assert(e.pf == 0); // otherwise this isn't really full + assert(e.pf == 0u); // otherwise this isn't really full UNLOCK_THIS_MODIFIED_SYNCVAR(src, ret, SYNCFEB_STATE_EMPTY_NO_WAITERS); } if (dest) { *dest = ret; } @@ -1002,7 +1002,7 @@ static inline void qthread_syncvar_gotlock_fill(qthread_shepherd_t *shep, int API_FUNC qthread_syncvar_writeF(syncvar_t *restrict dest, uint64_t const *restrict src) { /*{{{ */ assert(qthread_library_initialized); - eflags_t e = {0, 0, 0, 0, 0}; + eflags_t e = {0u, 0u, 0u, 0u, 0u}; uint64_t ret = *src; qthread_shepherd_t *shep = qthread_internal_getshep(); @@ -1012,15 +1012,15 @@ int API_FUNC qthread_syncvar_writeF(syncvar_t *restrict dest, return qthread_syncvar_nonblocker_func(dest, (void *)src, WRITEF); } qthread_mwaitc(dest, SYNCFEB_ANY, INT_MAX, &e); - qassert_ret(e.cf == 0, - QTHREAD_TIMEOUT); /* there better not have been a timeout */ - if ((e.pf == 1) && (e.sf == 1)) { /* there are waiters to release */ + qassert_ret(e.cf == 0u, + QTHREAD_TIMEOUT); /* there better not have been a timeout */ + if ((e.pf == 1u) && (e.sf == 1u)) { /* there are waiters to release */ int const lockbin = QTHREAD_CHOOSE_STRIPE(dest); qthread_addrstat_t *m; QTHREAD_COUNT_THREADS_BINCOUNTER(febs, lockbin); - e.sf = 0; // I'm releasing waiters - e.pf = 0; // I'm going to mark this as full + e.sf = 0u; // I'm releasing waiters + e.pf = 0u; // I'm going to mark this as full #ifdef LOCK_FREE_FEBS do { m = (qthread_addrstat_t *)qt_hash_get(syncvars[lockbin], (void *)dest); @@ -1054,12 +1054,12 @@ int API_FUNC qthread_syncvar_writeF(syncvar_t *restrict dest, QTHREAD_FASTLOCK_LOCK(&(m->lock)); #endif /* ifdef LOCK_FREE_FEBS */ if (m->FEQ) { - e.pf = 1; // back to being empty + e.pf = 1u; // back to being empty if (m->FEQ->next) { - e.sf = 1; // only one will be dequeued, so it'll still have waiters + e.sf = 1u; // only one will be dequeued, so it'll still have waiters } } - UNLOCK_THIS_MODIFIED_SYNCVAR(dest, ret, (e.pf << 1) | e.sf); + UNLOCK_THIS_MODIFIED_SYNCVAR(dest, ret, (e.pf << 1u) | e.sf); assert(m->FFQ || m->FEQ); // otherwise there weren't really any waiters assert(m->EFQ == NULL); // someone snuck in! qthread_syncvar_gotlock_fill(shep, m, dest, ret); @@ -1079,7 +1079,7 @@ int API_FUNC qthread_syncvar_writeF_const(syncvar_t *restrict dest, int API_FUNC qthread_syncvar_writeEF(syncvar_t *restrict dest, uint64_t const *restrict src) { /*{{{ */ assert(qthread_library_initialized); - eflags_t e = {0, 0, 0, 0, 0}; + eflags_t e = {0u, 0u, 0u, 0u, 0u}; int const lockbin = QTHREAD_CHOOSE_STRIPE(dest); qthread_t *me = qthread_internal_self(); @@ -1092,10 +1092,10 @@ int API_FUNC qthread_syncvar_writeEF(syncvar_t *restrict dest, qthread_addrres_t *X; uint64_t ret = qthread_mwaitc(dest, SYNCFEB_ANY, INT_MAX, &e); - qassert_ret(e.cf == 0, + qassert_ret(e.cf == 0u, QTHREAD_TIMEOUT); /* there better not have been a timeout */ - if (e.pf == 1) { /* it got empty! */ - if (e.sf == 1) { /* not just empty, but with waiters! */ + if (e.pf == 1u) { /* it got empty! */ + if (e.sf == 1u) { /* not just empty, but with waiters! */ goto locked_empty_waiters; } else { goto locked_empty; @@ -1155,13 +1155,13 @@ int API_FUNC qthread_syncvar_writeEF(syncvar_t *restrict dest, &me->thread_state, QTHREAD_STATE_FEB_BLOCKED, memory_order_relaxed); me->rdata->blockedon.addr = m; qthread_back_to_master(me); - } else if (e.sf == 1) { /* there are waiters to release! */ + } else if (e.sf == 1u) { /* there are waiters to release! */ qthread_addrstat_t *m; locked_empty_waiters: - assert(e.pf == 1); // otherwise it wasn't really empty - e.pf = 0; // mark full - e.sf = 0; // released! + assert(e.pf == 1u); // otherwise it wasn't really empty + e.pf = 0u; // mark full + e.sf = 0u; // released! #ifdef LOCK_FREE_FEBS do { m = (qthread_addrstat_t *)qt_hash_get(syncvars[lockbin], (void *)dest); @@ -1197,21 +1197,21 @@ int API_FUNC qthread_syncvar_writeEF(syncvar_t *restrict dest, assert(m->FFQ || m->FEQ); // otherwise there weren't really any waiters assert(m->EFQ == NULL); // someone snuck in! if (m->FEQ) { - e.pf = 1; - if (m->FEQ->next) { e.sf = 1; } + e.pf = 1u; + if (m->FEQ->next) { e.sf = 1u; } } { uint64_t val = *src; - UNLOCK_THIS_MODIFIED_SYNCVAR(dest, val, (e.pf << 1) | e.sf); + UNLOCK_THIS_MODIFIED_SYNCVAR(dest, val, (e.pf << 1u) | e.sf); qthread_syncvar_gotlock_fill(me->rdata->shepherd_ptr, m, dest, val); } } else { uint64_t val; locked_empty: - assert(e.pf == 1); // otherwise it wasn't really empty - assert(e.sf == 0); + assert(e.pf == 1u); // otherwise it wasn't really empty + assert(e.sf == 0u); val = *src; - // e.pf = 0; // now mark it full + // e.pf = 0u; // now mark it full UNLOCK_THIS_MODIFIED_SYNCVAR(dest, val, SYNCFEB_STATE_FULL_NO_WAITERS); } return QTHREAD_SUCCESS; @@ -1225,7 +1225,7 @@ int API_FUNC qthread_syncvar_writeEF_const(syncvar_t *restrict dest, int qthread_syncvar_writeEF_nb(syncvar_t *restrict dest, uint64_t const *restrict src) { /*{{{ */ - eflags_t e = {0, 0, 0, 0, 0}; + eflags_t e = {0u, 0u, 0u, 0u, 0u}; int const lockbin = QTHREAD_CHOOSE_STRIPE(dest); qthread_t *me = qthread_internal_self(); @@ -1237,13 +1237,13 @@ int qthread_syncvar_writeEF_nb(syncvar_t *restrict dest, (void)qthread_mwaitc(dest, SYNCFEB_EMPTY, 1, &e); if (e.cf) { /* there was a timeout */ return QTHREAD_OPFAIL; - } else if (e.sf == 1) { /* there are waiters to release! */ + } else if (e.sf == 1u) { /* there are waiters to release! */ qthread_addrstat_t *m; QTHREAD_COUNT_THREADS_BINCOUNTER(febs, lockbin); - assert(e.pf == 1); // otherwise it wasn't really empty - e.pf = 0; // mark full - e.sf = 0; // released! + assert(e.pf == 1u); // otherwise it wasn't really empty + e.pf = 0u; // mark full + e.sf = 0u; // released! #ifdef LOCK_FREE_FEBS do { m = (qthread_addrstat_t *)qt_hash_get(syncvars[lockbin], (void *)dest); @@ -1279,20 +1279,20 @@ int qthread_syncvar_writeEF_nb(syncvar_t *restrict dest, assert(m->FFQ || m->FEQ); // otherwise there weren't really any waiters assert(m->EFQ == NULL); // someone snuck in! if (m->FEQ) { - e.pf = 1; - if (m->FEQ->next) { e.sf = 1; } + e.pf = 1u; + if (m->FEQ->next) { e.sf = 1u; } } { uint64_t val = *src; - UNLOCK_THIS_MODIFIED_SYNCVAR(dest, val, (e.pf << 1) | e.sf); + UNLOCK_THIS_MODIFIED_SYNCVAR(dest, val, (e.pf << 1u) | e.sf); qthread_syncvar_gotlock_fill(me->rdata->shepherd_ptr, m, dest, val); } } else { uint64_t val; - assert(e.pf == 1); // otherwise it wasn't really empty - assert(e.sf == 0); + assert(e.pf == 1u); // otherwise it wasn't really empty + assert(e.sf == 0u); val = *src; - // e.pf = 0; // now mark it full + // e.pf = 0u; // now mark it full UNLOCK_THIS_MODIFIED_SYNCVAR(dest, val, SYNCFEB_STATE_FULL_NO_WAITERS); } return QTHREAD_SUCCESS; @@ -1306,22 +1306,23 @@ int API_FUNC qthread_syncvar_writeEF_const_nb(syncvar_t *restrict dest, uint64_t API_FUNC qthread_syncvar_incrF(syncvar_t *restrict operand, uint64_t const inc) { /*{{{ */ assert(qthread_library_initialized); - eflags_t e = {0, 0, 0, 0, 0}; + eflags_t e = {0u, 0u, 0u, 0u, 0u}; uint64_t newv; qthread_t *me = qthread_internal_self(); assert(operand); if (!me) { return qthread_syncvar_blocker_func(operand, (void *)&inc, INCR); } qthread_mwaitc(operand, SYNCFEB_ANY, INT_MAX, &e); - qassert_ret(e.cf == 0, - QTHREAD_TIMEOUT); /* there better not have been a timeout */ - if ((e.pf == 1) && (e.sf == 1)) { /* there are waiters to release */ + qassert_ret( + e.cf == 0u, + (uint64_t)QTHREAD_TIMEOUT); /* there better not have been a timeout */ + if ((e.pf == 1u) && (e.sf == 1u)) { /* there are waiters to release */ int const lockbin = QTHREAD_CHOOSE_STRIPE(operand); qthread_addrstat_t *m; QTHREAD_COUNT_THREADS_BINCOUNTER(febs, lockbin); - e.sf = 0; // I'm releasing waiters - e.pf = 0; // I'm going to mark this as full + e.sf = 0u; // I'm releasing waiters + e.pf = 0u; // I'm going to mark this as full #ifdef LOCK_FREE_FEBS do { m = (qthread_addrstat_t *)qt_hash_get(syncvars[lockbin], (void *)operand); @@ -1355,19 +1356,19 @@ uint64_t API_FUNC qthread_syncvar_incrF(syncvar_t *restrict operand, QTHREAD_FASTLOCK_LOCK(&(m->lock)); #endif /* ifdef LOCK_FREE_FEBS */ if (m->FEQ) { - e.pf = 1; // back to being empty + e.pf = 1u; // back to being empty if (m->FEQ->next) { - e.sf = 1; // only one will be dequeued, so it'll still have waiters + e.sf = 1u; // only one will be dequeued, so it'll still have waiters } } newv = operand->u.s.data + inc; - UNLOCK_THIS_MODIFIED_SYNCVAR(operand, newv, (e.pf << 1) | e.sf); + UNLOCK_THIS_MODIFIED_SYNCVAR(operand, newv, (e.pf << 1u) | e.sf); assert(m->FFQ || m->EFQ); // otherwise there weren't really any waiters assert(m->FEQ == NULL); // someone snuck in! qthread_syncvar_gotlock_fill(me->rdata->shepherd_ptr, m, operand, newv); } else { newv = operand->u.s.data + inc; - UNLOCK_THIS_MODIFIED_SYNCVAR(operand, newv, (e.pf << 1) | e.sf); + UNLOCK_THIS_MODIFIED_SYNCVAR(operand, newv, (e.pf << 1u) | e.sf); } return newv; diff --git a/src/teams.c b/src/teams.c index 6b1c1346a..a0ad0607e 100644 --- a/src/teams.c +++ b/src/teams.c @@ -112,7 +112,8 @@ void INTERNAL qt_internal_teamfinish(qt_team_t *team, FREE_TEAM(team); - qthread_internal_incr(&(qlib->team_count), &qlib->team_count_lock, -1); + qthread_internal_incr( + &(qlib->team_count), &qlib->team_count_lock, (aligned_t)-1); } else { // 1.2. This task is a subteam leader assert(team->sinc); @@ -178,7 +179,8 @@ void INTERNAL qt_internal_teamfinish(qt_team_t *team, FREE_TEAM(team); - qthread_internal_incr(&(qlib->team_count), &qlib->team_count_lock, -1); + qthread_internal_incr( + &(qlib->team_count), &qlib->team_count_lock, (aligned_t)-1); } } else { diff --git a/src/threadqueues/nemesis_threadqueues.c b/src/threadqueues/nemesis_threadqueues.c index 3fadd3b10..cee6ffbcd 100644 --- a/src/threadqueues/nemesis_threadqueues.c +++ b/src/threadqueues/nemesis_threadqueues.c @@ -121,7 +121,7 @@ qt_internal_NEMESIS_dequeue(NEMESIS_queue *q) { /*{{{ */ atomic_store_explicit(&q->head, NULL, memory_order_relaxed); } - qt_threadqueue_node_t *const retval = (void *volatile)(q->shadow_head); + qt_threadqueue_node_t *const retval = (void *)(q->shadow_head); if ((retval != NULL) && (retval != (void *)1)) { struct _qt_threadqueue_node *next_loc = @@ -155,7 +155,7 @@ qt_internal_NEMESIS_dequeue_st(NEMESIS_queue *q) { /*{{{ */ atomic_store_explicit(&q->head, NULL, memory_order_relaxed); } - qt_threadqueue_node_t *const retval = (void *volatile)(q->shadow_head); + qt_threadqueue_node_t *const retval = (void *)(q->shadow_head); if ((retval != NULL) && (retval != (void *)1)) { void *retval_next_tmp = @@ -180,7 +180,7 @@ void INTERNAL qt_threadqueue_free(qt_threadqueue_t *q) { /*{{{ */ if (node) { qthread_t *retval = node->thread; assert(atomic_load_explicit(&node->next, memory_order_relaxed) == NULL); - (void)qthread_incr(&(q->advisory_queuelen), -1); + (void)qthread_incr(&(q->advisory_queuelen), (aligned_t)-1); FREE_TQNODE(node); qthread_thread_free(retval); } else { @@ -297,7 +297,7 @@ qt_scheduler_get_thread(qt_threadqueue_t *q, } assert(node); assert(atomic_load_explicit(&node->next, memory_order_relaxed) == NULL); - (void)qthread_incr(&(q->advisory_queuelen), -1); + (void)qthread_incr(&(q->advisory_queuelen), (aligned_t)-1); retval = node->thread; FREE_TQNODE(node); return retval; diff --git a/src/touch.c b/src/touch.c index 40a5536a1..988678b14 100644 --- a/src/touch.c +++ b/src/touch.c @@ -12,24 +12,10 @@ void qthread_run_needed_task(syncvar_t *value) { qthread_shepherd_t *shep = qthread_internal_getshep(); - qthread_t *target = NULL; qthread_t *orig_t = qthread_internal_self(); - // ucontext_t my_context; - - if ((target = qt_threadqueue_dequeue_specific(shep->ready, value))) { + if (qt_threadqueue_dequeue_specific(shep->ready, value)) { // switch to task and run -- else missing and return - // qthread_t * t = n->value; - - // getcontext(&my_context); // done inside qthread_exec - // t->rdata->return_context = &my_context; // done inside qthread_exec - // qassert(qthread_writeEF_const(t, (aligned_t*)t->ret, (t->f) (t->arg, - // t->arg)), QTHREAD_SUCCESS); - /* note: there's a good argument that the following should - * be: (*t->f)(t), however the state management would be - * more complex - */ - // qthread_exec(t, &my_context); qthread_back_to_master(orig_t); } } diff --git a/test/basics/qtimer.c b/test/basics/qtimer.c index 973e9c4a9..cfca15f2b 100644 --- a/test/basics/qtimer.c +++ b/test/basics/qtimer.c @@ -5,7 +5,7 @@ #include #include -unsigned int const ITER = 100; +#define ITER 100ull static int dcmp(void const *a, void const *b) { if (*(double *)a > *(double *)b) { diff --git a/test/features/allpairs.c b/test/features/allpairs.c index aea2281f5..130cfcbb2 100644 --- a/test/features/allpairs.c +++ b/test/features/allpairs.c @@ -49,8 +49,8 @@ static void printout(int *restrict * restrict out) static void mult(void const *inta_void, void const *intb_void, void *restrict out_void) { - int *const inta = (int *const)inta_void; - int *const intb = (int *const)intb_void; + int *const inta = (int *)inta_void; + int *const intb = (int *)intb_void; int *restrict out = out_void; test_check(*out == -1); *out = (*inta) * (*intb); diff --git a/test/features/subteams.c b/test/features/subteams.c index a186eed64..52c6ff4d8 100644 --- a/test/features/subteams.c +++ b/test/features/subteams.c @@ -97,7 +97,7 @@ static aligned_t test_async(void *args_) { } } - qthread_incr(&donecount, -1); + qthread_incr(&donecount, (aligned_t)-1); return 0; } diff --git a/test/stress/feb_stream.c b/test/stress/feb_stream.c index fabb6b6d7..40ace3e42 100644 --- a/test/stress/feb_stream.c +++ b/test/stress/feb_stream.c @@ -33,7 +33,7 @@ static aligned_t producer(void *arg) { qthread_writeEF_const(&buff[buffInd], i); iprintf("producer wrote value #%u\n", i); } - qthread_writeEF_const(&buff[numItems % bufferSize], -1); + qthread_writeEF_const(&buff[numItems % bufferSize], (aligned_t)-1); iprintf("producer wrote terminus value #%" PRIu64 "\n", -1); return 0; diff --git a/test/stress/syncvar_stream.c b/test/stress/syncvar_stream.c index 17f094e01..51b945ee4 100644 --- a/test/stress/syncvar_stream.c +++ b/test/stress/syncvar_stream.c @@ -33,8 +33,10 @@ static aligned_t producer(void *arg) { qthread_syncvar_writeEF_const(&buff[buffInd], i); iprintf("producer wrote value #%u\n", i); } - qthread_syncvar_writeEF_const(&buff[numItems % bufferSize], INT64TOINT60(-1)); - iprintf("producer wrote terminus value #%" PRIu64 "\n", INT64TOINT60(-1)); + qthread_syncvar_writeEF_const(&buff[numItems % bufferSize], + INT64TOINT60((aligned_t)-1)); + iprintf("producer wrote terminus value #%" PRIu64 "\n", + INT64TOINT60((aligned_t)-1)); return 0; } From d8a269062baa62c7bd0a399b6a996ca5587fdcda Mon Sep 17 00:00:00 2001 From: Ian Henriksen Date: Mon, 7 Oct 2024 13:49:45 -0600 Subject: [PATCH 4/5] Fix an additional warning that shows up with the latest clang. --- test/basics/read.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/test/basics/read.c b/test/basics/read.c index 0b7fc6eba..e4095d1f9 100644 --- a/test/basics/read.c +++ b/test/basics/read.c @@ -5,12 +5,13 @@ #include char const cfilename[] = "test_qthread_read.XXXXXXXXXXXXX"; +#define BUFFER_SIZE 32u // strlen(cfilename) + 1 static aligned_t reader(void *arg) { int ret; - char buf[strlen(cfilename) + 1]; + char buf[BUFFER_SIZE]; - memset(buf, 0, strlen(cfilename) + 1); + memset(buf, 0, BUFFER_SIZE); iprintf("in reader function\n"); ret = read((int)(intptr_t)arg, buf, 5); iprintf("read '%s'\n", buf); @@ -28,12 +29,12 @@ static aligned_t reader(void *arg) { int main(int argc, char *argv[]) { aligned_t t; int fd; - char filename[strlen(cfilename) + 1]; + char filename[BUFFER_SIZE]; ssize_t ret; CHECK_VERBOSE(); - snprintf(filename, strlen(cfilename) + 1, "%s", cfilename); + snprintf(filename, BUFFER_SIZE, "%s", cfilename); iprintf("filename = '%s'\n", filename); /* First, set up a temporary file */ @@ -43,8 +44,8 @@ int main(int argc, char *argv[]) { if (fd < 0) { perror("mkstemp failed"); } test_check(fd >= 0); - ret = write(fd, cfilename, strlen(cfilename)); - test_check(ret == strlen(cfilename)); + ret = write(fd, cfilename, BUFFER_SIZE - 1u); + test_check(ret == BUFFER_SIZE - 1u); ret = (ssize_t)lseek(fd, 0, SEEK_SET); test_check(ret == 0); From 9572788ae3bf04a1c83f4a60d36c887f0bf8de30 Mon Sep 17 00:00:00 2001 From: Ian Henriksen Date: Mon, 7 Oct 2024 15:12:32 -0600 Subject: [PATCH 5/5] Auto-cancel github actions workflow if a newer commit gets pushed to a given branch/PR. --- .github/workflows/CI.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index a3b5936c9..8a5187c64 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -2,6 +2,12 @@ name: CI on: [push, pull_request] +# Cancel the current workflow if a newer commit gets pushed to the +# branch or PR that triggered it. +concurrency: + group: ${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/master' }} + jobs: linux-gcc: runs-on: ubuntu-22.04