Skip to content

Commit

Permalink
bitops: unify non-atomic bitops prototypes across architectures
Browse files Browse the repository at this point in the history
Currently, there is a mess with the prototypes of the non-atomic
bitops across the different architectures:

ret	bool, int, unsigned long
nr	int, long, unsigned int, unsigned long
addr	volatile unsigned long *, volatile void *

Thankfully, it doesn't provoke any bugs, but can sometimes make
the compiler angry when it's not handy at all.
Adjust all the prototypes to the following standard:

ret	bool				retval can be only 0 or 1
nr	unsigned long			native; signed makes no sense
addr	volatile unsigned long *	bitmaps are arrays of ulongs

Next, some architectures don't define 'arch_' versions as they don't
support instrumentation, others do. To make sure there is always the
same set of callables present and to ease any potential future
changes, make them all follow the rule:
 * architecture-specific files define only 'arch_' versions;
 * non-prefixed versions can be defined only in asm-generic files;
and place the non-prefixed definitions into a new file in
asm-generic to be included by non-instrumented architectures.

Finally, add some static assertions in order to prevent people from
making a mess in this room again.
I also used the %__always_inline attribute consistently, so that
they always get resolved to the actual operations.

Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Alexander Lobakin <alexandr.lobakin@intel.com>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Yury Norov <yury.norov@gmail.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Yury Norov <yury.norov@gmail.com>
  • Loading branch information
alobakin authored and YuryNorov committed Jul 1, 2022
1 parent 21bb8af commit 0e86283
Show file tree
Hide file tree
Showing 13 changed files with 229 additions and 150 deletions.
32 changes: 17 additions & 15 deletions arch/alpha/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
static inline void
__set_bit(unsigned long nr, volatile void * addr)
static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
int *m = ((int *) addr) + (nr >> 5);

Expand Down Expand Up @@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
static __inline__ void
__clear_bit(unsigned long nr, volatile void * addr)
static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
int *m = ((int *) addr) + (nr >> 5);

Expand All @@ -94,7 +94,7 @@ static inline void
__clear_bit_unlock(unsigned long nr, volatile void * addr)
{
smp_mb();
__clear_bit(nr, addr);
arch___clear_bit(nr, addr);
}

static inline void
Expand All @@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
static __inline__ void
__change_bit(unsigned long nr, volatile void * addr)
static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
int *m = ((int *) addr) + (nr >> 5);

Expand Down Expand Up @@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr)
/*
* WARNING: non atomic version.
*/
static inline int
__test_and_set_bit(unsigned long nr, volatile void * addr)
static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5);
Expand Down Expand Up @@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
static inline int
__test_and_clear_bit(unsigned long nr, volatile void * addr)
static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5);
Expand Down Expand Up @@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
static __inline__ int
__test_and_change_bit(unsigned long nr, volatile void * addr)
static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5);
Expand All @@ -283,8 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr)
return (old & mask) != 0;
}

static inline int
test_bit(int nr, const volatile void * addr)
static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
}
Expand Down Expand Up @@ -450,6 +450,8 @@ sched_find_first_bit(const unsigned long b[2])
return __ffs(tmp) + ofs;
}

#include <asm-generic/bitops/non-instrumented-non-atomic.h>

#include <asm-generic/bitops/le.h>

#include <asm-generic/bitops/ext2-atomic-setbit.h>
Expand Down
24 changes: 15 additions & 9 deletions arch/hexagon/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr)
* be atomic, particularly for things like slab_lock and slab_unlock.
*
*/
static inline void __clear_bit(int nr, volatile unsigned long *addr)
static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
test_and_clear_bit(nr, addr);
}

static inline void __set_bit(int nr, volatile unsigned long *addr)
static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
test_and_set_bit(nr, addr);
}

static inline void __change_bit(int nr, volatile unsigned long *addr)
static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
test_and_change_bit(nr, addr);
}

/* Apparently, at least some of these are allowed to be non-atomic */
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_clear_bit(nr, addr);
}

static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}

static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_change_bit(nr, addr);
}

static inline int __test_bit(int nr, const volatile unsigned long *addr)
static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
int retval;

Expand All @@ -172,8 +179,6 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr)
return retval;
}

#define test_bit(nr, addr) __test_bit(nr, addr)

/*
* ffz - find first zero in word.
* @word: The word to search
Expand Down Expand Up @@ -271,6 +276,7 @@ static inline unsigned long __fls(unsigned long word)
}

#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/non-instrumented-non-atomic.h>

#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
Expand Down
42 changes: 22 additions & 20 deletions arch/ia64/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,16 +53,16 @@ set_bit (int nr, volatile void *addr)
}

/**
* __set_bit - Set a bit in memory
* arch___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __inline__ void
__set_bit (int nr, volatile void *addr)
static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
}
Expand Down Expand Up @@ -135,16 +135,16 @@ __clear_bit_unlock(int nr, void *addr)
}

/**
* __clear_bit - Clears a bit in memory (non-atomic version)
* arch___clear_bit - Clears a bit in memory (non-atomic version)
* @nr: the bit to clear
* @addr: the address to start counting from
*
* Unlike clear_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __inline__ void
__clear_bit (int nr, volatile void *addr)
static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
}
Expand Down Expand Up @@ -175,16 +175,16 @@ change_bit (int nr, volatile void *addr)
}

/**
* __change_bit - Toggle a bit in memory
* arch___change_bit - Toggle a bit in memory
* @nr: the bit to toggle
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __inline__ void
__change_bit (int nr, volatile void *addr)
static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
}
Expand Down Expand Up @@ -224,16 +224,16 @@ test_and_set_bit (int nr, volatile void *addr)
#define test_and_set_bit_lock test_and_set_bit

/**
* __test_and_set_bit - Set a bit and return its old value
* arch___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __inline__ int
__test_and_set_bit (int nr, volatile void *addr)
static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
Expand Down Expand Up @@ -269,16 +269,16 @@ test_and_clear_bit (int nr, volatile void *addr)
}

/**
* __test_and_clear_bit - Clear a bit and return its old value
* arch___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __inline__ int
__test_and_clear_bit(int nr, volatile void * addr)
static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
Expand Down Expand Up @@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr)
}

/**
* __test_and_change_bit - Change a bit and return its old value
* arch___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
*/
static __inline__ int
__test_and_change_bit (int nr, void *addr)
static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 old, bit = (1 << (nr & 31));
__u32 *m = (__u32 *) addr + (nr >> 5);
Expand All @@ -331,8 +331,8 @@ __test_and_change_bit (int nr, void *addr)
return (old & bit) != 0;
}

static __inline__ int
test_bit (int nr, const volatile void *addr)
static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
}
Expand Down Expand Up @@ -443,6 +443,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x)

#ifdef __KERNEL__

#include <asm-generic/bitops/non-instrumented-non-atomic.h>

#include <asm-generic/bitops/le.h>

#include <asm-generic/bitops/ext2-atomic-setbit.h>
Expand Down
Loading

0 comments on commit 0e86283

Please sign in to comment.