From 0e862838f290147ea9c16db852d8d494b552d38d Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Fri, 24 Jun 2022 14:13:07 +0200 Subject: bitops: unify non-atomic bitops prototypes across architectures Currently, there is a mess with the prototypes of the non-atomic bitops across the different architectures: ret bool, int, unsigned long nr int, long, unsigned int, unsigned long addr volatile unsigned long *, volatile void * Thankfully, it doesn't provoke any bugs, but can sometimes make the compiler angry when it's not handy at all. Adjust all the prototypes to the following standard: ret bool retval can be only 0 or 1 nr unsigned long native; signed makes no sense addr volatile unsigned long * bitmaps are arrays of ulongs Next, some architectures don't define 'arch_' versions as they don't support instrumentation, others do. To make sure there is always the same set of callables present and to ease any potential future changes, make them all follow the rule: * architecture-specific files define only 'arch_' versions; * non-prefixed versions can be defined only in asm-generic files; and place the non-prefixed definitions into a new file in asm-generic to be included by non-instrumented architectures. Finally, add some static assertions in order to prevent people from making a mess in this room again. I also used the %__always_inline attribute consistently, so that they always get resolved to the actual operations. Suggested-by: Andy Shevchenko Signed-off-by: Alexander Lobakin Acked-by: Mark Rutland Reviewed-by: Yury Norov Reviewed-by: Andy Shevchenko Signed-off-by: Yury Norov --- arch/alpha/include/asm/bitops.h | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) (limited to 'arch/alpha') diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h index e1d8483a45f2..492c7713ddae 100644 --- a/arch/alpha/include/asm/bitops.h +++ b/arch/alpha/include/asm/bitops.h @@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static inline void -__set_bit(unsigned long nr, volatile void * addr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { int *m = ((int *) addr) + (nr >> 5); @@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static __inline__ void -__clear_bit(unsigned long nr, volatile void * addr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { int *m = ((int *) addr) + (nr >> 5); @@ -94,7 +94,7 @@ static inline void __clear_bit_unlock(unsigned long nr, volatile void * addr) { smp_mb(); - __clear_bit(nr, addr); + arch___clear_bit(nr, addr); } static inline void @@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static __inline__ void -__change_bit(unsigned long nr, volatile void * addr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { int *m = ((int *) addr) + (nr >> 5); @@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr) /* * WARNING: non atomic version. */ -static inline int -__test_and_set_bit(unsigned long nr, volatile void * addr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = 1 << (nr & 0x1f); int *m = ((int *) addr) + (nr >> 5); @@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static inline int -__test_and_clear_bit(unsigned long nr, volatile void * addr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = 1 << (nr & 0x1f); int *m = ((int *) addr) + (nr >> 5); @@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static __inline__ int -__test_and_change_bit(unsigned long nr, volatile void * addr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = 1 << (nr & 0x1f); int *m = ((int *) addr) + (nr >> 5); @@ -283,8 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr) return (old & mask) != 0; } -static inline int -test_bit(int nr, const volatile void * addr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; } @@ -450,6 +450,8 @@ sched_find_first_bit(const unsigned long b[2]) return __ffs(tmp) + ofs; } +#include + #include #include -- cgit v1.2.3