Skip to content
Permalink
Browse files
locking/atomic/x86: Introduce arch_try_cmpxchg64()
Add arch_try_cmpxchg64(), similar to arch_try_cmpxchg(), that
operates with 64 bit operands. This function provides the same
interface for 32 bit and 64 bit targets.

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
  • Loading branch information
ubizjak authored and intel-lab-lkp committed Dec 15, 2020
1 parent a03cc52 commit 60a11e7e63e120b5fd41b5346cf5a05ea71c7cb2
Show file tree
Hide file tree
Showing 2 changed files with 59 additions and 9 deletions.
@@ -35,15 +35,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 value)
: "memory");
}

#ifdef CONFIG_X86_CMPXCHG64
#define arch_cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#define arch_cmpxchg64_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#endif

static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
{
u64 prev;
@@ -71,6 +62,39 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
}

#ifndef CONFIG_X86_CMPXCHG64
#define arch_cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#define arch_cmpxchg64_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \

#define __raw_try_cmpxchg64(_ptr, _pold, _new, lock) \
({ \
bool success; \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm volatile(lock "cmpxchg8b %1" \
CC_SET(z) \
: CC_OUT(z) (success), \
"+m" (*_ptr), \
"+A" (__old) \
: "b" ((unsigned int)__new), \
"c" ((unsigned int)(__new>>32)) \
: "memory"); \
if (unlikely(!success)) \
*_old = __old; \
likely(success); \
})

#define __try_cmpxchg64(ptr, pold, new) \
__raw_try_cmpxchg64((ptr), (pold), (new), LOCK_PREFIX)

#define arch_try_cmpxchg64(ptr, pold, new) \
__try_cmpxchg64((ptr), (pold), (new))

#else

/*
* Building a kernel capable running on 80386 and 80486. It may be necessary
* to simulate the cmpxchg8b on the 80386 and 80486 CPU.
@@ -108,6 +132,26 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
: "memory"); \
__ret; })

#define arch_try_cmpxchg64(ptr, po, n) \
({ \
bool success; \
__typeof__(ptr) _old = (__typeof__(ptr))(po); \
__typeof__(*(ptr)) __old = *_old; \
__typeof__(*(ptr)) __new = (n); \
alternative_io(LOCK_PREFIX_HERE \
"call cmpxchg8b_emu", \
"lock; cmpxchg8b (%%esi)" , \
X86_FEATURE_CX8, \
"+A" (__old), \
"S" ((ptr)), \
"b" ((unsigned int)__new), \
"c" ((unsigned int)(__new>>32)) \
: "memory"); \
success = (__old == *_old); \
if (unlikely(!success)) \
*_old = __old; \
likely(success); \
})
#endif

#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
@@ -19,6 +19,12 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
arch_cmpxchg_local((ptr), (o), (n)); \
})

#define arch_try_cmpxchg64(ptr, po, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
arch_try_cmpxchg((ptr), (po), (n)); \
})

#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)

#endif /* _ASM_X86_CMPXCHG_64_H */

0 comments on commit 60a11e7

Please sign in to comment.