Skip to content

Commit

Permalink
lib/ovs-atomic: Add missing macro argument parentheses.
Browse files Browse the repository at this point in the history
Otherwise the dereference operator could target a portion of a ternary
expression, for example.

Also minor style fixes.

Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Acked-by: Ben Pfaff <blp@nicira.com>
  • Loading branch information
Jarno Rajahalme committed Aug 29, 2014
1 parent 985998e commit 0b83904
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 27 deletions.
8 changes: 4 additions & 4 deletions lib/ovs-atomic-gcc4+.h
Expand Up @@ -83,7 +83,7 @@ atomic_signal_fence(memory_order order)
\
if (IS_LOCKLESS_ATOMIC(*dst__)) { \
atomic_thread_fence(ORDER); \
*(typeof(*DST) volatile *)dst__ = src__; \
*(typeof(*(DST)) volatile *)dst__ = src__; \
atomic_thread_fence_if_seq_cst(ORDER); \
} else { \
atomic_store_locked(dst__, src__); \
Expand All @@ -99,7 +99,7 @@ atomic_signal_fence(memory_order order)
\
if (IS_LOCKLESS_ATOMIC(*src__)) { \
atomic_thread_fence_if_seq_cst(ORDER); \
*dst__ = *(typeof(*SRC) volatile *)src__; \
*dst__ = *(typeof(*(SRC)) volatile *)src__; \
} else { \
atomic_read_locked(src__, dst__); \
} \
Expand Down Expand Up @@ -128,7 +128,6 @@ atomic_signal_fence(memory_order order)
#define atomic_compare_exchange_weak_explicit \
atomic_compare_exchange_strong_explicit


#define atomic_op__(RMW, OP, ARG, ORIG) \
({ \
typeof(RMW) rmw__ = (RMW); \
Expand All @@ -140,11 +139,12 @@ atomic_signal_fence(memory_order order)
} else { \
atomic_op_locked(rmw__, OP, arg__, orig__); \
} \
(void) 0; \
})

#define atomic_add(RMW, ARG, ORIG) atomic_op__(RMW, add, ARG, ORIG)
#define atomic_sub(RMW, ARG, ORIG) atomic_op__(RMW, sub, ARG, ORIG)
#define atomic_or( RMW, ARG, ORIG) atomic_op__(RMW, or, ARG, ORIG)
#define atomic_or(RMW, ARG, ORIG) atomic_op__(RMW, or, ARG, ORIG)
#define atomic_xor(RMW, ARG, ORIG) atomic_op__(RMW, xor, ARG, ORIG)
#define atomic_and(RMW, ARG, ORIG) atomic_op__(RMW, and, ARG, ORIG)

Expand Down
32 changes: 16 additions & 16 deletions lib/ovs-atomic-i586.h
Expand Up @@ -209,10 +209,10 @@ atomic_signal_fence(memory_order order)
#define atomic_exchange__(DST, SRC, ORDER) \
({ \
typeof(DST) dst___ = (DST); \
typeof(*DST) src___ = (SRC); \
typeof(*(DST)) src___ = (SRC); \
\
if ((ORDER) > memory_order_consume) { \
if (sizeof(*DST) == 8) { \
if (sizeof(*(DST)) == 8) { \
atomic_exchange_8__(dst___, src___, "memory"); \
} else { \
asm volatile("xchg %1,%0 ; " \
Expand All @@ -222,7 +222,7 @@ atomic_signal_fence(memory_order order)
:: "memory"); \
} \
} else { \
if (sizeof(*DST) == 8) { \
if (sizeof(*(DST)) == 8) { \
atomic_exchange_8__(dst___, src___, "cc"); \
} else { \
asm volatile("xchg %1,%0 ; " \
Expand All @@ -237,10 +237,10 @@ atomic_signal_fence(memory_order order)
#define atomic_store_explicit(DST, SRC, ORDER) \
({ \
typeof(DST) dst__ = (DST); \
typeof(*DST) src__ = (SRC); \
typeof(*(DST)) src__ = (SRC); \
\
if ((ORDER) != memory_order_seq_cst \
&& sizeof(*DST) <= 4) { \
&& sizeof(*(DST)) <= 4) { \
atomic_compiler_barrier(ORDER); \
*dst__ = src__; \
} else { \
Expand All @@ -259,10 +259,10 @@ atomic_signal_fence(memory_order order)
typeof(DST) dst__ = (DST); \
typeof(SRC) src__ = (SRC); \
\
if (sizeof(*DST) <= 4) { \
if (sizeof(*(DST)) <= 4) { \
*dst__ = *src__; \
} else { \
typeof(*DST) res__; \
typeof(*(DST)) res__; \
\
asm volatile(" movl %%ebx,%%eax ; " \
" movl %%ecx,%%edx ; " \
Expand Down Expand Up @@ -325,21 +325,21 @@ atomic_signal_fence(memory_order order)
({ \
typeof(DST) dst__ = (DST); \
typeof(DST) expp__ = (EXP); \
typeof(*DST) src__ = (SRC); \
typeof(*DST) exp__ = *expp__; \
typeof(*(DST)) src__ = (SRC); \
typeof(*(DST)) exp__ = *expp__; \
uint8_t res__; \
(void)ORD_FAIL; \
\
if ((ORDER) > memory_order_consume) { \
if (sizeof(*DST) <= 4) { \
if (sizeof(*(DST)) <= 4) { \
atomic_compare_exchange__(dst__, exp__, src__, res__, \
"memory"); \
} else { \
atomic_compare_exchange_8__(dst__, exp__, src__, res__, \
"memory"); \
} \
} else { \
if (sizeof(*DST) <= 4) { \
if (sizeof(*(DST)) <= 4) { \
atomic_compare_exchange__(dst__, exp__, src__, res__, \
"cc"); \
} else { \
Expand Down Expand Up @@ -371,7 +371,7 @@ atomic_signal_fence(memory_order order)
#define atomic_add_32__(RMW, ARG, ORIG, ORDER) \
({ \
typeof(RMW) rmw__ = (RMW); \
typeof(*RMW) arg__ = (ARG); \
typeof(*(RMW)) arg__ = (ARG); \
\
if ((ORDER) > memory_order_consume) { \
atomic_add__(rmw__, arg__, "memory"); \
Expand All @@ -388,7 +388,7 @@ atomic_signal_fence(memory_order order)
typeof(RMW) rmw__ = (RMW); \
typeof(ARG) arg__ = (ARG); \
\
typeof(*RMW) val__; \
typeof(*(RMW)) val__; \
\
atomic_read_explicit(rmw__, &val__, memory_order_relaxed); \
do { \
Expand All @@ -400,22 +400,22 @@ atomic_signal_fence(memory_order order)
})

#define atomic_add_explicit(RMW, ARG, ORIG, ORDER) \
(sizeof(*RMW) <= 4 \
(sizeof(*(RMW)) <= 4 \
? atomic_add_32__(RMW, ARG, ORIG, ORDER) \
: atomic_op__(RMW, +, ARG, ORIG, ORDER))
#define atomic_add(RMW, ARG, ORIG) \
atomic_add_explicit(RMW, ARG, ORIG, memory_order_seq_cst)

#define atomic_sub_explicit(RMW, ARG, ORIG, ORDER) \
(sizeof(*RMW) <= 4 \
(sizeof(*(RMW)) <= 4 \
? atomic_add_32__(RMW, -(ARG), ORIG, ORDER) \
: atomic_op__(RMW, -, ARG, ORIG, ORDER))
#define atomic_sub(RMW, ARG, ORIG) \
atomic_sub_explicit(RMW, ARG, ORIG, memory_order_seq_cst)

#define atomic_or_explicit(RMW, ARG, ORIG, ORDER) \
atomic_op__(RMW, |, ARG, ORIG, ORDER)
#define atomic_or( RMW, ARG, ORIG) \
#define atomic_or(RMW, ARG, ORIG) \
atomic_or_explicit(RMW, ARG, ORIG, memory_order_seq_cst)

#define atomic_xor_explicit(RMW, ARG, ORIG, ORDER) \
Expand Down
14 changes: 7 additions & 7 deletions lib/ovs-atomic-x86_64.h
Expand Up @@ -174,7 +174,7 @@ atomic_signal_fence(memory_order order)
#define atomic_exchange__(DST, SRC, ORDER) \
({ \
typeof(DST) dst___ = (DST); \
typeof(*DST) src___ = (SRC); \
typeof(*(DST)) src___ = (SRC); \
\
if ((ORDER) > memory_order_consume) { \
asm volatile("xchg %1,%0 ; " \
Expand All @@ -198,7 +198,7 @@ atomic_signal_fence(memory_order order)
#define atomic_store_explicit(DST, SRC, ORDER) \
({ \
typeof(DST) dst__ = (DST); \
typeof(*DST) src__ = (SRC); \
typeof(*(DST)) src__ = (SRC); \
\
if ((ORDER) != memory_order_seq_cst) { \
atomic_compiler_barrier(ORDER); \
Expand Down Expand Up @@ -248,8 +248,8 @@ atomic_signal_fence(memory_order order)
({ \
typeof(DST) dst__ = (DST); \
typeof(DST) expp__ = (EXP); \
typeof(*DST) src__ = (SRC); \
typeof(*DST) exp__ = *expp__; \
typeof(*(DST)) src__ = (SRC); \
typeof(*(DST)) exp__ = *expp__; \
uint8_t res__; \
(void)ORD_FAIL; \
\
Expand Down Expand Up @@ -284,7 +284,7 @@ atomic_signal_fence(memory_order order)
#define atomic_add_explicit(RMW, ARG, ORIG, ORDER) \
({ \
typeof(RMW) rmw__ = (RMW); \
typeof(*RMW) arg__ = (ARG); \
typeof(*(RMW)) arg__ = (ARG); \
\
if ((ORDER) > memory_order_consume) { \
atomic_add__(rmw__, arg__, "memory"); \
Expand All @@ -308,7 +308,7 @@ atomic_signal_fence(memory_order order)
typeof(RMW) rmw__ = (RMW); \
typeof(ARG) arg__ = (ARG); \
\
typeof(*RMW) val__; \
typeof(*(RMW)) val__; \
\
atomic_read_explicit(rmw__, &val__, memory_order_relaxed); \
do { \
Expand All @@ -321,7 +321,7 @@ atomic_signal_fence(memory_order order)

#define atomic_or_explicit(RMW, ARG, ORIG, ORDER) \
atomic_op__(RMW, |, ARG, ORIG, ORDER)
#define atomic_or( RMW, ARG, ORIG) \
#define atomic_or(RMW, ARG, ORIG) \
atomic_or_explicit(RMW, ARG, ORIG, memory_order_seq_cst)

#define atomic_xor_explicit(RMW, ARG, ORIG, ORDER) \
Expand Down

0 comments on commit 0b83904

Please sign in to comment.