Commit 4e23eeeb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'bitmap-6.0-rc1' of https://github.com/norov/linux

Pull bitmap updates from Yury Norov:

 - fix the duplicated comments on bitmap_to_arr64() (Qu Wenruo)

 - optimize out non-atomic bitops on compile-time constants (Alexander
   Lobakin)

 - cleanup bitmap-related headers (Yury Norov)

 - x86/olpc: fix 'logical not is only applied to the left hand side'
   (Alexander Lobakin)

 - lib/nodemask: inline wrappers around bitmap (Yury Norov)

* tag 'bitmap-6.0-rc1' of https://github.com/norov/linux: (26 commits)
  lib/nodemask: inline next_node_in() and node_random()
  powerpc: drop dependency on <asm/machdep.h> in archrandom.h
  x86/olpc: fix 'logical not is only applied to the left hand side'
  lib/cpumask: move some one-line wrappers to header file
  headers/deps: mm: align MANITAINERS and Docs with new gfp.h structure
  headers/deps: mm: Split <linux/gfp_types.h> out of <linux/gfp.h>
  headers/deps: mm: Optimize <linux/gfp.h> header dependencies
  lib/cpumask: move trivial wrappers around find_bit to the header
  lib/cpumask: change return types to unsigned where appropriate
  cpumask: change return types to bool where appropriate
  lib/bitmap: change type of bitmap_weight to unsigned long
  lib/bitmap: change return types to bool where appropriate
  arm: align find_bit declarations with generic kernel
  iommu/vt-d: avoid invalid memory access via node_online(NUMA_NO_NODE)
  lib/test_bitmap: test the tail after bitmap_to_arr64()
  lib/bitmap: fix off-by-one in bitmap_to_arr64()
  lib: test_bitmap: add compile-time optimization/evaluations assertions
  bitmap: don't assume compiler evaluates small mem*() builtins calls
  net/ice: fix initializing the bitmap in the switch code
  bitops: let optimize out non-atomic bitops on compile-time constants
  ...
parents 3bc1bc0b 36d4b36b
...@@ -22,16 +22,16 @@ Memory Allocation Controls ...@@ -22,16 +22,16 @@ Memory Allocation Controls
.. kernel-doc:: include/linux/gfp.h .. kernel-doc:: include/linux/gfp.h
:internal: :internal:
.. kernel-doc:: include/linux/gfp.h .. kernel-doc:: include/linux/gfp_types.h
:doc: Page mobility and placement hints :doc: Page mobility and placement hints
.. kernel-doc:: include/linux/gfp.h .. kernel-doc:: include/linux/gfp_types.h
:doc: Watermark modifiers :doc: Watermark modifiers
.. kernel-doc:: include/linux/gfp.h .. kernel-doc:: include/linux/gfp_types.h
:doc: Reclaim modifiers :doc: Reclaim modifiers
.. kernel-doc:: include/linux/gfp.h .. kernel-doc:: include/linux/gfp_types.h
:doc: Useful GFP flag combinations :doc: Useful GFP flag combinations
The Slab Cache The Slab Cache
......
...@@ -3603,7 +3603,6 @@ F: lib/bitmap.c ...@@ -3603,7 +3603,6 @@ F: lib/bitmap.c
F: lib/cpumask.c F: lib/cpumask.c
F: lib/find_bit.c F: lib/find_bit.c
F: lib/find_bit_benchmark.c F: lib/find_bit_benchmark.c
F: lib/nodemask.c
F: lib/test_bitmap.c F: lib/test_bitmap.c
F: tools/include/linux/bitmap.h F: tools/include/linux/bitmap.h
F: tools/include/linux/find.h F: tools/include/linux/find.h
...@@ -13136,6 +13135,7 @@ W: http://www.linux-mm.org ...@@ -13136,6 +13135,7 @@ W: http://www.linux-mm.org
T: git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm T: git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
T: quilt git://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new T: quilt git://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new
F: include/linux/gfp.h F: include/linux/gfp.h
F: include/linux/gfp_types.h
F: include/linux/memory_hotplug.h F: include/linux/memory_hotplug.h
F: include/linux/mm.h F: include/linux/mm.h
F: include/linux/mmzone.h F: include/linux/mmzone.h
......
...@@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr) ...@@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr)
/* /*
* WARNING: non atomic version. * WARNING: non atomic version.
*/ */
static inline void static __always_inline void
__set_bit(unsigned long nr, volatile void * addr) arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
int *m = ((int *) addr) + (nr >> 5); int *m = ((int *) addr) + (nr >> 5);
...@@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr) ...@@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr)
/* /*
* WARNING: non atomic version. * WARNING: non atomic version.
*/ */
static __inline__ void static __always_inline void
__clear_bit(unsigned long nr, volatile void * addr) arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
int *m = ((int *) addr) + (nr >> 5); int *m = ((int *) addr) + (nr >> 5);
...@@ -94,7 +94,7 @@ static inline void ...@@ -94,7 +94,7 @@ static inline void
__clear_bit_unlock(unsigned long nr, volatile void * addr) __clear_bit_unlock(unsigned long nr, volatile void * addr)
{ {
smp_mb(); smp_mb();
__clear_bit(nr, addr); arch___clear_bit(nr, addr);
} }
static inline void static inline void
...@@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr) ...@@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr)
/* /*
* WARNING: non atomic version. * WARNING: non atomic version.
*/ */
static __inline__ void static __always_inline void
__change_bit(unsigned long nr, volatile void * addr) arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
int *m = ((int *) addr) + (nr >> 5); int *m = ((int *) addr) + (nr >> 5);
...@@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr) ...@@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr)
/* /*
* WARNING: non atomic version. * WARNING: non atomic version.
*/ */
static inline int static __always_inline bool
__test_and_set_bit(unsigned long nr, volatile void * addr) arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = 1 << (nr & 0x1f); unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5); int *m = ((int *) addr) + (nr >> 5);
...@@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr) ...@@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr)
/* /*
* WARNING: non atomic version. * WARNING: non atomic version.
*/ */
static inline int static __always_inline bool
__test_and_clear_bit(unsigned long nr, volatile void * addr) arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = 1 << (nr & 0x1f); unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5); int *m = ((int *) addr) + (nr >> 5);
...@@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr) ...@@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
/* /*
* WARNING: non atomic version. * WARNING: non atomic version.
*/ */
static __inline__ int static __always_inline bool
__test_and_change_bit(unsigned long nr, volatile void * addr) arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = 1 << (nr & 0x1f); unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5); int *m = ((int *) addr) + (nr >> 5);
...@@ -283,8 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr) ...@@ -283,8 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr)
return (old & mask) != 0; return (old & mask) != 0;
} }
static inline int static __always_inline bool
test_bit(int nr, const volatile void * addr) arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
} }
...@@ -450,6 +450,8 @@ sched_find_first_bit(const unsigned long b[2]) ...@@ -450,6 +450,8 @@ sched_find_first_bit(const unsigned long b[2])
return __ffs(tmp) + ofs; return __ffs(tmp) + ofs;
} }
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h> #include <asm-generic/bitops/ext2-atomic-setbit.h>
......
...@@ -160,18 +160,20 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p); ...@@ -160,18 +160,20 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p);
/* /*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0. * Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
*/ */
extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size); unsigned long _find_first_zero_bit_le(const unsigned long *p, unsigned long size);
extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset); unsigned long _find_next_zero_bit_le(const unsigned long *p,
extern int _find_first_bit_le(const unsigned long *p, unsigned size); unsigned long size, unsigned long offset);
extern int _find_next_bit_le(const unsigned long *p, int size, int offset); unsigned long _find_first_bit_le(const unsigned long *p, unsigned long size);
unsigned long _find_next_bit_le(const unsigned long *p, unsigned long size, unsigned long offset);
/* /*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0. * Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
*/ */
extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size); unsigned long _find_first_zero_bit_be(const unsigned long *p, unsigned long size);
extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset); unsigned long _find_next_zero_bit_be(const unsigned long *p,
extern int _find_first_bit_be(const unsigned long *p, unsigned size); unsigned long size, unsigned long offset);
extern int _find_next_bit_be(const unsigned long *p, int size, int offset); unsigned long _find_first_bit_be(const unsigned long *p, unsigned long size);
unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsigned long offset);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* /*
......
...@@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr) ...@@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr)
* be atomic, particularly for things like slab_lock and slab_unlock. * be atomic, particularly for things like slab_lock and slab_unlock.
* *
*/ */
static inline void __clear_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
test_and_clear_bit(nr, addr); test_and_clear_bit(nr, addr);
} }
static inline void __set_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
test_and_set_bit(nr, addr); test_and_set_bit(nr, addr);
} }
static inline void __change_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
test_and_change_bit(nr, addr); test_and_change_bit(nr, addr);
} }
/* Apparently, at least some of these are allowed to be non-atomic */ /* Apparently, at least some of these are allowed to be non-atomic */
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
return test_and_clear_bit(nr, addr); return test_and_clear_bit(nr, addr);
} }
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
return test_and_set_bit(nr, addr); return test_and_set_bit(nr, addr);
} }
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
return test_and_change_bit(nr, addr); return test_and_change_bit(nr, addr);
} }
static inline int __test_bit(int nr, const volatile unsigned long *addr) static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
int retval; int retval;
...@@ -172,8 +179,6 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr) ...@@ -172,8 +179,6 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr)
return retval; return retval;
} }
#define test_bit(nr, addr) __test_bit(nr, addr)
/* /*
* ffz - find first zero in word. * ffz - find first zero in word.
* @word: The word to search * @word: The word to search
...@@ -271,6 +276,7 @@ static inline unsigned long __fls(unsigned long word) ...@@ -271,6 +276,7 @@ static inline unsigned long __fls(unsigned long word)
} }
#include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/sched.h>
......
...@@ -53,7 +53,7 @@ set_bit (int nr, volatile void *addr) ...@@ -53,7 +53,7 @@ set_bit (int nr, volatile void *addr)
} }
/** /**
* __set_bit - Set a bit in memory * arch___set_bit - Set a bit in memory
* @nr: the bit to set * @nr: the bit to set
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -61,8 +61,8 @@ set_bit (int nr, volatile void *addr) ...@@ -61,8 +61,8 @@ set_bit (int nr, volatile void *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static __inline__ void static __always_inline void
__set_bit (int nr, volatile void *addr) arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
} }
...@@ -135,7 +135,7 @@ __clear_bit_unlock(int nr, void *addr) ...@@ -135,7 +135,7 @@ __clear_bit_unlock(int nr, void *addr)
} }
/** /**
* __clear_bit - Clears a bit in memory (non-atomic version) * arch___clear_bit - Clears a bit in memory (non-atomic version)
* @nr: the bit to clear * @nr: the bit to clear
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -143,8 +143,8 @@ __clear_bit_unlock(int nr, void *addr) ...@@ -143,8 +143,8 @@ __clear_bit_unlock(int nr, void *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static __inline__ void static __always_inline void
__clear_bit (int nr, volatile void *addr) arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31)); *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
} }
...@@ -175,7 +175,7 @@ change_bit (int nr, volatile void *addr) ...@@ -175,7 +175,7 @@ change_bit (int nr, volatile void *addr)
} }
/** /**
* __change_bit - Toggle a bit in memory * arch___change_bit - Toggle a bit in memory
* @nr: the bit to toggle * @nr: the bit to toggle
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -183,8 +183,8 @@ change_bit (int nr, volatile void *addr) ...@@ -183,8 +183,8 @@ change_bit (int nr, volatile void *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static __inline__ void static __always_inline void
__change_bit (int nr, volatile void *addr) arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
} }
...@@ -224,7 +224,7 @@ test_and_set_bit (int nr, volatile void *addr) ...@@ -224,7 +224,7 @@ test_and_set_bit (int nr, volatile void *addr)
#define test_and_set_bit_lock test_and_set_bit #define test_and_set_bit_lock test_and_set_bit
/** /**
* __test_and_set_bit - Set a bit and return its old value * arch___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set * @nr: Bit to set
* @addr: Address to count from * @addr: Address to count from
* *
...@@ -232,8 +232,8 @@ test_and_set_bit (int nr, volatile void *addr) ...@@ -232,8 +232,8 @@ test_and_set_bit (int nr, volatile void *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static __inline__ int static __always_inline bool
__test_and_set_bit (int nr, volatile void *addr) arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
__u32 *p = (__u32 *) addr + (nr >> 5); __u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31); __u32 m = 1 << (nr & 31);
...@@ -269,7 +269,7 @@ test_and_clear_bit (int nr, volatile void *addr) ...@@ -269,7 +269,7 @@ test_and_clear_bit (int nr, volatile void *addr)
} }
/** /**
* __test_and_clear_bit - Clear a bit and return its old value * arch___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear * @nr: Bit to clear
* @addr: Address to count from * @addr: Address to count from
* *
...@@ -277,8 +277,8 @@ test_and_clear_bit (int nr, volatile void *addr) ...@@ -277,8 +277,8 @@ test_and_clear_bit (int nr, volatile void *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static __inline__ int static __always_inline bool
__test_and_clear_bit(int nr, volatile void * addr) arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
__u32 *p = (__u32 *) addr + (nr >> 5); __u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31); __u32 m = 1 << (nr & 31);
...@@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr) ...@@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr)
} }
/** /**
* __test_and_change_bit - Change a bit and return its old value * arch___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change * @nr: Bit to change
* @addr: Address to count from * @addr: Address to count from
* *
* This operation is non-atomic and can be reordered. * This operation is non-atomic and can be reordered.
*/ */
static __inline__ int static __always_inline bool
__test_and_change_bit (int nr, void *addr) arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
__u32 old, bit = (1 << (nr & 31)); __u32 old, bit = (1 << (nr & 31));
__u32 *m = (__u32 *) addr + (nr >> 5); __u32 *m = (__u32 *) addr + (nr >> 5);
...@@ -331,8 +331,8 @@ __test_and_change_bit (int nr, void *addr) ...@@ -331,8 +331,8 @@ __test_and_change_bit (int nr, void *addr)
return (old & bit) != 0; return (old & bit) != 0;
} }
static __inline__ int static __always_inline bool
test_bit (int nr, const volatile void *addr) arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
} }
...@@ -443,6 +443,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x) ...@@ -443,6 +443,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x)
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h> #include <asm-generic/bitops/ext2-atomic-setbit.h>
......
...@@ -538,7 +538,7 @@ ia64_get_irr(unsigned int vector) ...@@ -538,7 +538,7 @@ ia64_get_irr(unsigned int vector)
{ {
unsigned int reg = vector / 64; unsigned int reg = vector / 64;
unsigned int bit = vector % 64; unsigned int bit = vector % 64;
u64 irr; unsigned long irr;
switch (reg) { switch (reg) {
case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
......
...@@ -65,8 +65,11 @@ static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr) ...@@ -65,8 +65,11 @@ static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
bfset_mem_set_bit(nr, vaddr)) bfset_mem_set_bit(nr, vaddr))
#endif #endif
#define __set_bit(nr, vaddr) set_bit(nr, vaddr) static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
set_bit(nr, addr);
}
static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr) static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
{ {
...@@ -105,8 +108,11 @@ static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr) ...@@ -105,8 +108,11 @@ static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
bfclr_mem_clear_bit(nr, vaddr)) bfclr_mem_clear_bit(nr, vaddr))
#endif #endif
#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
clear_bit(nr, addr);
}
static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr) static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
{ {
...@@ -145,14 +151,17 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr) ...@@ -145,14 +151,17 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
bfchg_mem_change_bit(nr, vaddr)) bfchg_mem_change_bit(nr, vaddr))
#endif #endif
#define __change_bit(nr, vaddr) change_bit(nr, vaddr) static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
static inline int test_bit(int nr, const volatile unsigned long *vaddr)
{ {
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; change_bit(nr, addr);
} }
static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
return (addr[nr >> 5] & (1UL << (nr & 31))) != 0;
}
static inline int bset_reg_test_and_set_bit(int nr, static inline int bset_reg_test_and_set_bit(int nr,
volatile unsigned long *vaddr) volatile unsigned long *vaddr)
...@@ -201,8 +210,11 @@ static inline int bfset_mem_test_and_set_bit(int nr, ...@@ -201,8 +210,11 @@ static inline int bfset_mem_test_and_set_bit(int nr,
bfset_mem_test_and_set_bit(nr, vaddr)) bfset_mem_test_and_set_bit(nr, vaddr))
#endif #endif
#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr) static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
static inline int bclr_reg_test_and_clear_bit(int nr, static inline int bclr_reg_test_and_clear_bit(int nr,
volatile unsigned long *vaddr) volatile unsigned long *vaddr)
...@@ -251,8 +263,11 @@ static inline int bfclr_mem_test_and_clear_bit(int nr, ...@@ -251,8 +263,11 @@ static inline int bfclr_mem_test_and_clear_bit(int nr,
bfclr_mem_test_and_clear_bit(nr, vaddr)) bfclr_mem_test_and_clear_bit(nr, vaddr))
#endif #endif
#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr) static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_clear_bit(nr, addr);
}
static inline int bchg_reg_test_and_change_bit(int nr, static inline int bchg_reg_test_and_change_bit(int nr,
volatile unsigned long *vaddr) volatile unsigned long *vaddr)
...@@ -301,8 +316,11 @@ static inline int bfchg_mem_test_and_change_bit(int nr, ...@@ -301,8 +316,11 @@ static inline int bfchg_mem_test_and_change_bit(int nr,
bfchg_mem_test_and_change_bit(nr, vaddr)) bfchg_mem_test_and_change_bit(nr, vaddr))
#endif #endif
#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr) static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_change_bit(nr, addr);
}
/* /*
* The true 68020 and more advanced processors support the "bfffo" * The true 68020 and more advanced processors support the "bfffo"
...@@ -522,6 +540,7 @@ static inline unsigned long __fls(unsigned long x) ...@@ -522,6 +540,7 @@ static inline unsigned long __fls(unsigned long x)
#define clear_bit_unlock clear_bit #define clear_bit_unlock clear_bit
#define __clear_bit_unlock clear_bit_unlock #define __clear_bit_unlock clear_bit_unlock
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h> #include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/sched.h>
......
...@@ -2,19 +2,12 @@ ...@@ -2,19 +2,12 @@
#ifndef _ASM_POWERPC_ARCHRANDOM_H #ifndef _ASM_POWERPC_ARCHRANDOM_H
#define _ASM_POWERPC_ARCHRANDOM_H #define _ASM_POWERPC_ARCHRANDOM_H
#include <asm/machdep.h>
static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{ {
return 0; return 0;
} }
static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs);
{
if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
return 1;
return 0;
}
#ifdef CONFIG_PPC_POWERNV #ifdef CONFIG_PPC_POWERNV
int pnv_get_random_long(unsigned long *v); int pnv_get_random_long(unsigned long *v);
......
...@@ -171,6 +171,14 @@ EXPORT_SYMBOL_GPL(machine_power_off); ...@@ -171,6 +171,14 @@ EXPORT_SYMBOL_GPL(machine_power_off);
void (*pm_power_off)(void); void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off); EXPORT_SYMBOL_GPL(pm_power_off);
size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
{
if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
return 1;
return 0;
}
EXPORT_SYMBOL(arch_get_random_seed_longs);
void machine_halt(void) void machine_halt(void)
{ {
machine_shutdown(); machine_shutdown();
......
...@@ -113,75 +113,76 @@ static inline bool arch_test_and_change_bit(unsigned long nr, ...@@ -113,75 +113,76 @@ static inline bool arch_test_and_change_bit(unsigned long nr,
return old & mask; return old & mask;
} }
static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr) static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
*addr |= mask; *p |= mask;
} }
static inline void arch___clear_bit(unsigned long nr, static __always_inline void
volatile unsigned long *ptr) arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
*addr &= ~mask; *p &= ~mask;
} }
static inline void arch___change_bit(unsigned long nr, static __always_inline void
volatile unsigned long *ptr) arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
*addr ^= mask; *p ^= mask;
} }
static inline bool arch___test_and_set_bit(unsigned long nr, static __always_inline bool
volatile unsigned long *ptr) arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
unsigned long old; unsigned long old;
old = *addr; old = *p;
*addr |= mask; *p |= mask;
return old & mask; return old & mask;
} }
static inline bool arch___test_and_clear_bit(unsigned long nr, static __always_inline bool
volatile unsigned long *ptr) arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
unsigned long old; unsigned long old;
old = *addr; old = *p;
*addr &= ~mask; *p &= ~mask;
return old & mask; return old & mask;
} }
static inline bool arch___test_and_change_bit(unsigned long nr, static __always_inline bool
volatile unsigned long *ptr) arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
unsigned long old; unsigned long old;
old = *addr; old = *p;
*addr ^= mask; *p ^= mask;
return old & mask; return old & mask;
} }
static inline bool arch_test_bit(unsigned long nr, static __always_inline bool
const volatile unsigned long *ptr) arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
const volatile unsigned long *addr = __bitops_word(nr, ptr); const volatile unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
return *addr & mask; return *p & mask;
} }
static inline bool arch_test_and_set_bit_lock(unsigned long nr, static inline bool arch_test_and_set_bit_lock(unsigned long nr,
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
#ifndef __ASM_SH_BITOPS_OP32_H #ifndef __ASM_SH_BITOPS_OP32_H
#define __ASM_SH_BITOPS_OP32_H #define __ASM_SH_BITOPS_OP32_H
#include <linux/bits.h>
/* /*
* The bit modifying instructions on SH-2A are only capable of working * The bit modifying instructions on SH-2A are only capable of working
* with a 3-bit immediate, which signifies the shift position for the bit * with a 3-bit immediate, which signifies the shift position for the bit
...@@ -16,7 +18,8 @@ ...@@ -16,7 +18,8 @@
#define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE) #define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE)
#endif #endif
static inline void __set_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
if (__builtin_constant_p(nr)) { if (__builtin_constant_p(nr)) {
__asm__ __volatile__ ( __asm__ __volatile__ (
...@@ -33,7 +36,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) ...@@ -33,7 +36,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
} }
} }
static inline void __clear_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
if (__builtin_constant_p(nr)) { if (__builtin_constant_p(nr)) {
__asm__ __volatile__ ( __asm__ __volatile__ (
...@@ -52,7 +56,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) ...@@ -52,7 +56,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
} }
/** /**
* __change_bit - Toggle a bit in memory * arch___change_bit - Toggle a bit in memory
* @nr: the bit to change * @nr: the bit to change
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -60,7 +64,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) ...@@ -60,7 +64,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static inline void __change_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
if (__builtin_constant_p(nr)) { if (__builtin_constant_p(nr)) {
__asm__ __volatile__ ( __asm__ __volatile__ (
...@@ -79,7 +84,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) ...@@ -79,7 +84,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
} }
/** /**
* __test_and_set_bit - Set a bit and return its old value * arch___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set * @nr: Bit to set
* @addr: Address to count from * @addr: Address to count from
* *
...@@ -87,7 +92,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) ...@@ -87,7 +92,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -98,7 +104,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) ...@@ -98,7 +104,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
} }
/** /**
* __test_and_clear_bit - Clear a bit and return its old value * arch___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear * @nr: Bit to clear
* @addr: Address to count from * @addr: Address to count from
* *
...@@ -106,7 +112,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) ...@@ -106,7 +112,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -117,8 +124,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) ...@@ -117,8 +124,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
} }
/* WARNING: non atomic and it can be reordered! */ /* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr, static __always_inline bool
volatile unsigned long *addr) arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -129,13 +136,16 @@ static inline int __test_and_change_bit(int nr, ...@@ -129,13 +136,16 @@ static inline int __test_and_change_bit(int nr,
} }
/** /**
* test_bit - Determine whether a bit is set * arch_test_bit - Determine whether a bit is set
* @nr: bit number to test * @nr: bit number to test
* @addr: Address to start counting from * @addr: Address to start counting from
*/ */
static inline int test_bit(int nr, const volatile unsigned long *addr) static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
} }
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#endif /* __ASM_SH_BITOPS_OP32_H */ #endif /* __ASM_SH_BITOPS_OP32_H */
...@@ -19,9 +19,9 @@ ...@@ -19,9 +19,9 @@
#error only <linux/bitops.h> can be included directly #error only <linux/bitops.h> can be included directly
#endif #endif
unsigned long ___set_bit(unsigned long *addr, unsigned long mask); unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask);
unsigned long ___clear_bit(unsigned long *addr, unsigned long mask); unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask);
unsigned long ___change_bit(unsigned long *addr, unsigned long mask); unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask);
/* /*
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0' * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
...@@ -36,7 +36,7 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *add ...@@ -36,7 +36,7 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *add
ADDR = ((unsigned long *) addr) + (nr >> 5); ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31); mask = 1 << (nr & 31);
return ___set_bit(ADDR, mask) != 0; return sp32___set_bit(ADDR, mask) != 0;
} }
static inline void set_bit(unsigned long nr, volatile unsigned long *addr) static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
...@@ -46,7 +46,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -46,7 +46,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
ADDR = ((unsigned long *) addr) + (nr >> 5); ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31); mask = 1 << (nr & 31);
(void) ___set_bit(ADDR, mask); (void) sp32___set_bit(ADDR, mask);
} }
static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
...@@ -56,7 +56,7 @@ static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *a ...@@ -56,7 +56,7 @@ static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *a
ADDR = ((unsigned long *) addr) + (nr >> 5); ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31); mask = 1 << (nr & 31);
return ___clear_bit(ADDR, mask) != 0; return sp32___clear_bit(ADDR, mask) != 0;
} }
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
...@@ -66,7 +66,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -66,7 +66,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
ADDR = ((unsigned long *) addr) + (nr >> 5); ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31); mask = 1 << (nr & 31);
(void) ___clear_bit(ADDR, mask); (void) sp32___clear_bit(ADDR, mask);
} }
static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
...@@ -76,7 +76,7 @@ static inline int test_and_change_bit(unsigned long nr, volatile unsigned long * ...@@ -76,7 +76,7 @@ static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *
ADDR = ((unsigned long *) addr) + (nr >> 5); ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31); mask = 1 << (nr & 31);
return ___change_bit(ADDR, mask) != 0; return sp32___change_bit(ADDR, mask) != 0;
} }
static inline void change_bit(unsigned long nr, volatile unsigned long *addr) static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
...@@ -86,7 +86,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -86,7 +86,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
ADDR = ((unsigned long *) addr) + (nr >> 5); ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31); mask = 1 << (nr & 31);
(void) ___change_bit(ADDR, mask); (void) sp32___change_bit(ADDR, mask);
} }
#include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/non-atomic.h>
......
...@@ -120,7 +120,7 @@ void arch_atomic_set(atomic_t *v, int i) ...@@ -120,7 +120,7 @@ void arch_atomic_set(atomic_t *v, int i)
} }
EXPORT_SYMBOL(arch_atomic_set); EXPORT_SYMBOL(arch_atomic_set);
unsigned long ___set_bit(unsigned long *addr, unsigned long mask) unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask)
{ {
unsigned long old, flags; unsigned long old, flags;
...@@ -131,9 +131,9 @@ unsigned long ___set_bit(unsigned long *addr, unsigned long mask) ...@@ -131,9 +131,9 @@ unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
return old & mask; return old & mask;
} }
EXPORT_SYMBOL(___set_bit); EXPORT_SYMBOL(sp32___set_bit);
unsigned long ___clear_bit(unsigned long *addr, unsigned long mask) unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask)
{ {
unsigned long old, flags; unsigned long old, flags;
...@@ -144,9 +144,9 @@ unsigned long ___clear_bit(unsigned long *addr, unsigned long mask) ...@@ -144,9 +144,9 @@ unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
return old & mask; return old & mask;
} }
EXPORT_SYMBOL(___clear_bit); EXPORT_SYMBOL(sp32___clear_bit);
unsigned long ___change_bit(unsigned long *addr, unsigned long mask) unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
{ {
unsigned long old, flags; unsigned long old, flags;
...@@ -157,7 +157,7 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask) ...@@ -157,7 +157,7 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
return old & mask; return old & mask;
} }
EXPORT_SYMBOL(___change_bit); EXPORT_SYMBOL(sp32___change_bit);
unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
{ {
......
...@@ -63,7 +63,7 @@ arch_set_bit(long nr, volatile unsigned long *addr) ...@@ -63,7 +63,7 @@ arch_set_bit(long nr, volatile unsigned long *addr)
} }
static __always_inline void static __always_inline void
arch___set_bit(long nr, volatile unsigned long *addr) arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
} }
...@@ -89,7 +89,7 @@ arch_clear_bit_unlock(long nr, volatile unsigned long *addr) ...@@ -89,7 +89,7 @@ arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
} }
static __always_inline void static __always_inline void
arch___clear_bit(long nr, volatile unsigned long *addr) arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
} }
...@@ -114,7 +114,7 @@ arch___clear_bit_unlock(long nr, volatile unsigned long *addr) ...@@ -114,7 +114,7 @@ arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
} }
static __always_inline void static __always_inline void
arch___change_bit(long nr, volatile unsigned long *addr) arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
} }
...@@ -145,7 +145,7 @@ arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr) ...@@ -145,7 +145,7 @@ arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
} }
static __always_inline bool static __always_inline bool
arch___test_and_set_bit(long nr, volatile unsigned long *addr) arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
bool oldbit; bool oldbit;
...@@ -171,7 +171,7 @@ arch_test_and_clear_bit(long nr, volatile unsigned long *addr) ...@@ -171,7 +171,7 @@ arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
* this without also updating arch/x86/kernel/kvm.c * this without also updating arch/x86/kernel/kvm.c
*/ */
static __always_inline bool static __always_inline bool
arch___test_and_clear_bit(long nr, volatile unsigned long *addr) arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
bool oldbit; bool oldbit;
...@@ -183,7 +183,7 @@ arch___test_and_clear_bit(long nr, volatile unsigned long *addr) ...@@ -183,7 +183,7 @@ arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
} }
static __always_inline bool static __always_inline bool
arch___test_and_change_bit(long nr, volatile unsigned long *addr) arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
bool oldbit; bool oldbit;
...@@ -219,10 +219,12 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l ...@@ -219,10 +219,12 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
return oldbit; return oldbit;
} }
#define arch_test_bit(nr, addr) \ static __always_inline bool
(__builtin_constant_p((nr)) \ arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
? constant_test_bit((nr), (addr)) \ {
: variable_test_bit((nr), (addr))) return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) :
variable_test_bit(nr, addr);
}
/** /**
* __ffs - find first set bit in word * __ffs - find first set bit in word
......
...@@ -80,7 +80,7 @@ static void send_ebook_state(void) ...@@ -80,7 +80,7 @@ static void send_ebook_state(void)
return; return;
} }
if (!!test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == state) if (test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == !!state)
return; /* Nothing new to report. */ return; /* Nothing new to report. */
input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state); input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state);
......
...@@ -494,7 +494,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) ...@@ -494,7 +494,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
if (drhd->reg_base_addr == rhsa->base_address) { if (drhd->reg_base_addr == rhsa->base_address) {
int node = pxm_to_node(rhsa->proximity_domain); int node = pxm_to_node(rhsa->proximity_domain);
if (!node_online(node)) if (node != NUMA_NO_NODE && !node_online(node))
node = NUMA_NO_NODE; node = NUMA_NO_NODE;
drhd->iommu->node = node; drhd->iommu->node = node;
return 0; return 0;
......
...@@ -4971,7 +4971,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, ...@@ -4971,7 +4971,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
bitmap_zero(recipes, ICE_MAX_NUM_RECIPES); bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
bitmap_zero(used_idx, ICE_MAX_FV_WORDS); bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS); bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
/* For each profile we are going to associate the recipe with, add the /* For each profile we are going to associate the recipe with, add the
* recipes that are associated with that profile. This will give us * recipes that are associated with that profile. This will give us
......
...@@ -463,7 +463,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -463,7 +463,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = min( field = min(
bitmap_weight(actv_ports.ports, dev->caps.num_ports), bitmap_weight(actv_ports.ports, dev->caps.num_ports),
dev->caps.num_ports); (unsigned int) dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
size = dev->caps.function_caps; /* set PF behaviours */ size = dev->caps.function_caps; /* set PF behaviours */
......
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
#define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
#include <linux/bits.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
/*
* Generic definitions for bit operations, should not be used in regular code
* directly.
*/
/**
* generic___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __always_inline void
generic___set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p |= mask;
}
static __always_inline void
generic___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p &= ~mask;
}
/**
* generic___change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __always_inline void
generic___change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p ^= mask;
}
/**
* generic___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __always_inline bool
generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old | mask;
return (old & mask) != 0;
}
/**
* generic___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __always_inline bool
generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old & ~mask;
return (old & mask) != 0;
}
/* WARNING: non atomic and it can be reordered! */
static __always_inline bool
generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old ^ mask;
return (old & mask) != 0;
}
/**
* generic_test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static __always_inline bool
generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
/*
* Unlike the bitops with the '__' prefix above, this one *is* atomic,
* so `volatile` must always stay here with no cast-aways. See
* `Documentation/atomic_bitops.txt` for the details.
*/
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
}
/*
* const_*() definitions provide good compile-time optimizations when
* the passed arguments can be resolved at compile time.
*/
#define const___set_bit generic___set_bit
#define const___clear_bit generic___clear_bit
#define const___change_bit generic___change_bit
#define const___test_and_set_bit generic___test_and_set_bit
#define const___test_and_clear_bit generic___test_and_clear_bit
#define const___test_and_change_bit generic___test_and_change_bit
/**
* const_test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*
* A version of generic_test_bit() which discards the `volatile` qualifier to
* allow a compiler to optimize code harder. Non-atomic and to be called only
* for testing compile-time constants, e.g. by the corresponding macros, not
* directly from "regular" code.
*/
static __always_inline bool
const_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr);
unsigned long mask = BIT_MASK(nr);
unsigned long val = *p;
return !!(val & mask);
}
#endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <linux/instrumented.h> #include <linux/instrumented.h>
/** /**
* __set_bit - Set a bit in memory * ___set_bit - Set a bit in memory
* @nr: the bit to set * @nr: the bit to set
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -22,14 +22,15 @@ ...@@ -22,14 +22,15 @@
* region of memory concurrently, the effect may be that only one operation * region of memory concurrently, the effect may be that only one operation
* succeeds. * succeeds.
*/ */
static __always_inline void __set_bit(long nr, volatile unsigned long *addr) static __always_inline void
___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
instrument_write(addr + BIT_WORD(nr), sizeof(long)); instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr); arch___set_bit(nr, addr);
} }
/** /**
* __clear_bit - Clears a bit in memory * ___clear_bit - Clears a bit in memory
* @nr: the bit to clear * @nr: the bit to clear
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -37,14 +38,15 @@ static __always_inline void __set_bit(long nr, volatile unsigned long *addr) ...@@ -37,14 +38,15 @@ static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation * region of memory concurrently, the effect may be that only one operation
* succeeds. * succeeds.
*/ */
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) static __always_inline void
___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
instrument_write(addr + BIT_WORD(nr), sizeof(long)); instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr); arch___clear_bit(nr, addr);
} }
/** /**
* __change_bit - Toggle a bit in memory * ___change_bit - Toggle a bit in memory
* @nr: the bit to change * @nr: the bit to change
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -52,7 +54,8 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) ...@@ -52,7 +54,8 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation * region of memory concurrently, the effect may be that only one operation
* succeeds. * succeeds.
*/ */
static __always_inline void __change_bit(long nr, volatile unsigned long *addr) static __always_inline void
___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
instrument_write(addr + BIT_WORD(nr), sizeof(long)); instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr); arch___change_bit(nr, addr);
...@@ -83,53 +86,57 @@ static __always_inline void __instrument_read_write_bitop(long nr, volatile unsi ...@@ -83,53 +86,57 @@ static __always_inline void __instrument_read_write_bitop(long nr, volatile unsi
} }
/** /**
* __test_and_set_bit - Set a bit and return its old value * ___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set * @nr: Bit to set
* @addr: Address to count from * @addr: Address to count from
* *
* This operation is non-atomic. If two instances of this operation race, one * This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail. * can appear to succeed but actually fail.
*/ */
static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) static __always_inline bool
___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
__instrument_read_write_bitop(nr, addr); __instrument_read_write_bitop(nr, addr);
return arch___test_and_set_bit(nr, addr); return arch___test_and_set_bit(nr, addr);
} }
/** /**
* __test_and_clear_bit - Clear a bit and return its old value * ___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear * @nr: Bit to clear
* @addr: Address to count from * @addr: Address to count from
* *
* This operation is non-atomic. If two instances of this operation race, one * This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail. * can appear to succeed but actually fail.
*/ */
static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) static __always_inline bool
___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
__instrument_read_write_bitop(nr, addr); __instrument_read_write_bitop(nr, addr);
return arch___test_and_clear_bit(nr, addr); return arch___test_and_clear_bit(nr, addr);
} }
/** /**
* __test_and_change_bit - Change a bit and return its old value * ___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change * @nr: Bit to change
* @addr: Address to count from * @addr: Address to count from
* *
* This operation is non-atomic. If two instances of this operation race, one * This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail. * can appear to succeed but actually fail.
*/ */
static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) static __always_inline bool
___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
__instrument_read_write_bitop(nr, addr); __instrument_read_write_bitop(nr, addr);
return arch___test_and_change_bit(nr, addr); return arch___test_and_change_bit(nr, addr);
} }
/** /**
* test_bit - Determine whether a bit is set * _test_bit - Determine whether a bit is set
* @nr: bit number to test * @nr: bit number to test
* @addr: Address to start counting from * @addr: Address to start counting from
*/ */
static __always_inline bool test_bit(long nr, const volatile unsigned long *addr) static __always_inline bool
_test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr); return arch_test_bit(nr, addr);
......
...@@ -2,121 +2,18 @@ ...@@ -2,121 +2,18 @@
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#include <asm/types.h> #include <asm-generic/bitops/generic-non-atomic.h>
/** #define arch___set_bit generic___set_bit
* arch___set_bit - Set a bit in memory #define arch___clear_bit generic___clear_bit
* @nr: the bit to set #define arch___change_bit generic___change_bit
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __always_inline void
arch___set_bit(unsigned int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p |= mask; #define arch___test_and_set_bit generic___test_and_set_bit
} #define arch___test_and_clear_bit generic___test_and_clear_bit
#define __set_bit arch___set_bit #define arch___test_and_change_bit generic___test_and_change_bit
static __always_inline void #define arch_test_bit generic_test_bit
arch___clear_bit(unsigned int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p &= ~mask; #include <asm-generic/bitops/non-instrumented-non-atomic.h>
}
#define __clear_bit arch___clear_bit
/**
* arch___change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __always_inline
void arch___change_bit(unsigned int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p ^= mask;
}
#define __change_bit arch___change_bit
/**
* arch___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __always_inline int
arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old | mask;
return (old & mask) != 0;
}
#define __test_and_set_bit arch___test_and_set_bit
/**
* arch___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __always_inline int
arch___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old & ~mask;
return (old & mask) != 0;
}
#define __test_and_clear_bit arch___test_and_clear_bit
/* WARNING: non atomic and it can be reordered! */
static __always_inline int
arch___test_and_change_bit(unsigned int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old ^ mask;
return (old & mask) != 0;
}
#define __test_and_change_bit arch___test_and_change_bit
/**
* arch_test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static __always_inline int
arch_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
}
#define test_bit arch_test_bit
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
#define __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
#define ___set_bit arch___set_bit
#define ___clear_bit arch___clear_bit
#define ___change_bit arch___change_bit
#define ___test_and_set_bit arch___test_and_set_bit
#define ___test_and_clear_bit arch___test_and_clear_bit
#define ___test_and_change_bit arch___test_and_change_bit
#define _test_bit arch_test_bit
#endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */
...@@ -71,9 +71,9 @@ struct device; ...@@ -71,9 +71,9 @@ struct device;
* bitmap_release_region(bitmap, pos, order) Free specified bit region * bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
* bitmap_from_arr64(dst, buf, nbits) Copy nbits from u64[] buf to dst
* bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
* bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
* bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
* bitmap_get_value8(map, start) Get 8bit value from map at start * bitmap_get_value8(map, start) Get 8bit value from map at start
* bitmap_set_value8(map, value, start) Set 8bit value to map at start * bitmap_set_value8(map, value, start) Set 8bit value to map at start
* *
...@@ -148,13 +148,13 @@ void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, ...@@ -148,13 +148,13 @@ void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits); unsigned int shift, unsigned int nbits);
void bitmap_cut(unsigned long *dst, const unsigned long *src, void bitmap_cut(unsigned long *dst, const unsigned long *src,
unsigned int first, unsigned int cut, unsigned int nbits); unsigned int first, unsigned int cut, unsigned int nbits);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits); const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits); const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits); const unsigned long *bitmap2, unsigned int nbits);
int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits); const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_replace(unsigned long *dst, void __bitmap_replace(unsigned long *dst,
const unsigned long *old, const unsigned long *new, const unsigned long *old, const unsigned long *new,
...@@ -163,7 +163,7 @@ bool __bitmap_intersects(const unsigned long *bitmap1, ...@@ -163,7 +163,7 @@ bool __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits); const unsigned long *bitmap2, unsigned int nbits);
bool __bitmap_subset(const unsigned long *bitmap1, bool __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits); const unsigned long *bitmap2, unsigned int nbits);
int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
void __bitmap_set(unsigned long *map, unsigned int start, int len); void __bitmap_set(unsigned long *map, unsigned int start, int len);
void __bitmap_clear(unsigned long *map, unsigned int start, int len); void __bitmap_clear(unsigned long *map, unsigned int start, int len);
...@@ -238,12 +238,20 @@ extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, ...@@ -238,12 +238,20 @@ extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{ {
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
if (small_const_nbits(nbits))
*dst = 0;
else
memset(dst, 0, len); memset(dst, 0, len);
} }
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{ {
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
if (small_const_nbits(nbits))
*dst = ~0UL;
else
memset(dst, 0xff, len); memset(dst, 0xff, len);
} }
...@@ -251,6 +259,10 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, ...@@ -251,6 +259,10 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits) unsigned int nbits)
{ {
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
if (small_const_nbits(nbits))
*dst = *src;
else
memcpy(dst, src, len); memcpy(dst, src, len);
} }
...@@ -303,7 +315,7 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits); ...@@ -303,7 +315,7 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits)) bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
#endif #endif
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits) const unsigned long *src2, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
...@@ -329,7 +341,7 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, ...@@ -329,7 +341,7 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits); __bitmap_xor(dst, src1, src2, nbits);
} }
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits) const unsigned long *src2, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
...@@ -419,7 +431,8 @@ static inline bool bitmap_full(const unsigned long *src, unsigned int nbits) ...@@ -419,7 +431,8 @@ static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits; return find_first_zero_bit(src, nbits) == nbits;
} }
static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits) static __always_inline
unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
...@@ -431,6 +444,8 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start, ...@@ -431,6 +444,8 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
{ {
if (__builtin_constant_p(nbits) && nbits == 1) if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map); __set_bit(start, map);
else if (small_const_nbits(start + nbits))
*map |= GENMASK(start + nbits - 1, start);
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) && __builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
...@@ -445,6 +460,8 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, ...@@ -445,6 +460,8 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
{ {
if (__builtin_constant_p(nbits) && nbits == 1) if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map); __clear_bit(start, map);
else if (small_const_nbits(start + nbits))
*map &= ~GENMASK(start + nbits - 1, start);
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) && __builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
......
...@@ -26,12 +26,62 @@ extern unsigned int __sw_hweight16(unsigned int w); ...@@ -26,12 +26,62 @@ extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w); extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w); extern unsigned long __sw_hweight64(__u64 w);
/*
* Defined here because those may be needed by architecture-specific static
* inlines.
*/
#include <asm-generic/bitops/generic-non-atomic.h>
/*
* Many architecture-specific non-atomic bitops contain inline asm code and due
* to that the compiler can't optimize them to compile-time expressions or
* constants. In contrary, generic_*() helpers are defined in pure C and
* compilers optimize them just well.
* Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
* equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
* the arguments can be resolved at compile time. That expression itself is a
* constant and doesn't bring any functional changes to the rest of cases.
* The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
* passing a bitmap from .bss or .data (-> `!!addr` is always true).
*/
#define bitop(op, nr, addr) \
((__builtin_constant_p(nr) && \
__builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \
(uintptr_t)(addr) != (uintptr_t)NULL && \
__builtin_constant_p(*(const unsigned long *)(addr))) ? \
const##op(nr, addr) : op(nr, addr))
#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
/* /*
* Include this here because some architectures need generic_ffs/fls in * Include this here because some architectures need generic_ffs/fls in
* scope * scope
*/ */
#include <asm/bitops.h> #include <asm/bitops.h>
/* Check that the bitops prototypes are sane */
#define __check_bitop_pr(name) \
static_assert(__same_type(arch_##name, generic_##name) && \
__same_type(const_##name, generic_##name) && \
__same_type(_##name, generic_##name))
__check_bitop_pr(__set_bit);
__check_bitop_pr(__clear_bit);
__check_bitop_pr(__change_bit);
__check_bitop_pr(__test_and_set_bit);
__check_bitop_pr(__test_and_clear_bit);
__check_bitop_pr(__test_and_change_bit);
__check_bitop_pr(test_bit);
#undef __check_bitop_pr
static inline int get_bitmask_order(unsigned int count) static inline int get_bitmask_order(unsigned int count)
{ {
int order; int order;
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/gfp_types.h>
#include <linux/numa.h>
/* Don't assign or return these: may not be this big! */ /* Don't assign or return these: may not be this big! */
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
...@@ -162,7 +164,21 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp) ...@@ -162,7 +164,21 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits); return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits);
} }
unsigned int __pure cpumask_next(int n, const struct cpumask *srcp); /**
* cpumask_next - get the next cpu in a cpumask
* @n: the cpu prior to the place to search (ie. return will be > @n)
* @srcp: the cpumask pointer
*
* Returns >= nr_cpu_ids if no further cpus set.
*/
static inline
unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
}
/** /**
* cpumask_next_zero - get the next unset cpu in a cpumask * cpumask_next_zero - get the next unset cpu in a cpumask
...@@ -179,9 +195,6 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) ...@@ -179,9 +195,6 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
} }
int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
#if NR_CPUS == 1 #if NR_CPUS == 1
/* Uniprocessor: there is only one valid CPU */ /* Uniprocessor: there is only one valid CPU */
static inline unsigned int cpumask_local_spread(unsigned int i, int node) static inline unsigned int cpumask_local_spread(unsigned int i, int node)
...@@ -200,11 +213,30 @@ static inline int cpumask_any_distribute(const struct cpumask *srcp) ...@@ -200,11 +213,30 @@ static inline int cpumask_any_distribute(const struct cpumask *srcp)
} }
#else #else
unsigned int cpumask_local_spread(unsigned int i, int node); unsigned int cpumask_local_spread(unsigned int i, int node);
int cpumask_any_and_distribute(const struct cpumask *src1p, unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p); const struct cpumask *src2p);
int cpumask_any_distribute(const struct cpumask *srcp); unsigned int cpumask_any_distribute(const struct cpumask *srcp);
#endif /* NR_CPUS */ #endif /* NR_CPUS */
/**
* cpumask_next_and - get the next cpu in *src1p & *src2p
* @n: the cpu prior to the place to search (ie. return will be > @n)
* @src1p: the first cpumask pointer
* @src2p: the second cpumask pointer
*
* Returns >= nr_cpu_ids if no further cpus set in both.
*/
static inline
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
nr_cpumask_bits, n + 1);
}
/** /**
* for_each_cpu - iterate over every cpu in a mask * for_each_cpu - iterate over every cpu in a mask
* @cpu: the (optionally unsigned) integer iterator * @cpu: the (optionally unsigned) integer iterator
...@@ -229,7 +261,7 @@ int cpumask_any_distribute(const struct cpumask *srcp); ...@@ -229,7 +261,7 @@ int cpumask_any_distribute(const struct cpumask *srcp);
(cpu) = cpumask_next_zero((cpu), (mask)), \ (cpu) = cpumask_next_zero((cpu), (mask)), \
(cpu) < nr_cpu_ids;) (cpu) < nr_cpu_ids;)
int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
/** /**
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
...@@ -265,6 +297,26 @@ int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool ...@@ -265,6 +297,26 @@ int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
(cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \ (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
(cpu) < nr_cpu_ids;) (cpu) < nr_cpu_ids;)
/**
* cpumask_any_but - return a "random" in a cpumask, but not this one.
* @mask: the cpumask to search
* @cpu: the cpu to ignore.
*
* Often used to find any cpu but smp_processor_id() in a mask.
* Returns >= nr_cpu_ids if no cpus set.
*/
static inline
unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
{
unsigned int i;
cpumask_check(cpu);
for_each_cpu(i, mask)
if (i != cpu)
break;
return i;
}
#define CPU_BITS_NONE \ #define CPU_BITS_NONE \
{ \ { \
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
...@@ -311,9 +363,9 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) ...@@ -311,9 +363,9 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids) * @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer * @cpumask: the cpumask pointer
* *
* Returns 1 if @cpu is set in @cpumask, else returns 0 * Returns true if @cpu is set in @cpumask, else returns false
*/ */
static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{ {
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
} }
...@@ -323,11 +375,11 @@ static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpuma ...@@ -323,11 +375,11 @@ static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpuma
* @cpu: cpu number (< nr_cpu_ids) * @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer * @cpumask: the cpumask pointer
* *
* Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0 * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
* *
* test_and_set_bit wrapper for cpumasks. * test_and_set_bit wrapper for cpumasks.
*/ */
static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{ {
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
} }
...@@ -337,11 +389,11 @@ static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpu ...@@ -337,11 +389,11 @@ static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpu
* @cpu: cpu number (< nr_cpu_ids) * @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer * @cpumask: the cpumask pointer
* *
* Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0 * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
* *
* test_and_clear_bit wrapper for cpumasks. * test_and_clear_bit wrapper for cpumasks.
*/ */
static __always_inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{ {
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
} }
...@@ -370,9 +422,9 @@ static inline void cpumask_clear(struct cpumask *dstp) ...@@ -370,9 +422,9 @@ static inline void cpumask_clear(struct cpumask *dstp)
* @src1p: the first input * @src1p: the first input
* @src2p: the second input * @src2p: the second input
* *
* If *@dstp is empty, returns 0, else returns 1 * If *@dstp is empty, returns false, else returns true
*/ */
static inline int cpumask_and(struct cpumask *dstp, static inline bool cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p, const struct cpumask *src1p,
const struct cpumask *src2p) const struct cpumask *src2p)
{ {
...@@ -413,9 +465,9 @@ static inline void cpumask_xor(struct cpumask *dstp, ...@@ -413,9 +465,9 @@ static inline void cpumask_xor(struct cpumask *dstp,
* @src1p: the first input * @src1p: the first input
* @src2p: the second input * @src2p: the second input
* *
* If *@dstp is empty, returns 0, else returns 1 * If *@dstp is empty, returns false, else returns true
*/ */
static inline int cpumask_andnot(struct cpumask *dstp, static inline bool cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p, const struct cpumask *src1p,
const struct cpumask *src2p) const struct cpumask *src2p)
{ {
...@@ -478,9 +530,9 @@ static inline bool cpumask_intersects(const struct cpumask *src1p, ...@@ -478,9 +530,9 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
* @src1p: the first input * @src1p: the first input
* @src2p: the second input * @src2p: the second input
* *
* Returns 1 if *@src1p is a subset of *@src2p, else returns 0 * Returns true if *@src1p is a subset of *@src2p, else returns false
*/ */
static inline int cpumask_subset(const struct cpumask *src1p, static inline bool cpumask_subset(const struct cpumask *src1p,
const struct cpumask *src2p) const struct cpumask *src2p)
{ {
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
...@@ -682,9 +734,35 @@ typedef struct cpumask *cpumask_var_t; ...@@ -682,9 +734,35 @@ typedef struct cpumask *cpumask_var_t;
#define __cpumask_var_read_mostly __read_mostly #define __cpumask_var_read_mostly __read_mostly
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); static inline
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
}
/**
* alloc_cpumask_var - allocate a struct cpumask
* @mask: pointer to cpumask_var_t where the cpumask is returned
* @flags: GFP_ flags
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
* a nop returning a constant 1 (in <linux/cpumask.h>).
*
* See alloc_cpumask_var_node.
*/
static inline
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
}
static inline
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var(mask, flags | __GFP_ZERO);
}
void alloc_bootmem_cpumask_var(cpumask_var_t *mask); void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask); void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask); void free_bootmem_cpumask_var(cpumask_var_t mask);
......
This diff is collapsed.
This diff is collapsed.
...@@ -94,6 +94,7 @@ ...@@ -94,6 +94,7 @@
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/minmax.h> #include <linux/minmax.h>
#include <linux/numa.h> #include <linux/numa.h>
#include <linux/random.h>
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_; extern nodemask_t _unused_nodemask_arg_;
...@@ -276,7 +277,14 @@ static inline unsigned int __next_node(int n, const nodemask_t *srcp) ...@@ -276,7 +277,14 @@ static inline unsigned int __next_node(int n, const nodemask_t *srcp)
* the first node in src if needed. Returns MAX_NUMNODES if src is empty. * the first node in src if needed. Returns MAX_NUMNODES if src is empty.
*/ */
#define next_node_in(n, src) __next_node_in((n), &(src)) #define next_node_in(n, src) __next_node_in((n), &(src))
unsigned int __next_node_in(int node, const nodemask_t *srcp); static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
{
unsigned int ret = __next_node(node, srcp);
if (ret == MAX_NUMNODES)
ret = __first_node(srcp);
return ret;
}
static inline void init_nodemask_of_node(nodemask_t *mask, int node) static inline void init_nodemask_of_node(nodemask_t *mask, int node)
{ {
...@@ -493,14 +501,20 @@ static inline int num_node_state(enum node_states state) ...@@ -493,14 +501,20 @@ static inline int num_node_state(enum node_states state)
#endif #endif
static inline int node_random(const nodemask_t *maskp)
{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1) #if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
extern int node_random(const nodemask_t *maskp); int w, bit = NUMA_NO_NODE;
w = nodes_weight(*maskp);
if (w)
bit = bitmap_ord_to_pos(maskp->bits,
get_random_int() % w, MAX_NUMNODES);
return bit;
#else #else
static inline int node_random(const nodemask_t *mask)
{
return 0; return 0;
}
#endif #endif
}
#define node_online_map node_states[N_ONLINE] #define node_online_map node_states[N_ONLINE]
#define node_possible_map node_states[N_POSSIBLE] #define node_possible_map node_states[N_POSSIBLE]
......
...@@ -33,7 +33,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ ...@@ -33,7 +33,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
flex_proportions.o ratelimit.o show_mem.o \ flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o \ nmi_backtrace.o win_minmax.o memcat_p.o \
buildid.o cpumask.o buildid.o cpumask.o
lib-$(CONFIG_PRINTK) += dump_stack.o lib-$(CONFIG_PRINTK) += dump_stack.o
......
...@@ -237,7 +237,7 @@ void bitmap_cut(unsigned long *dst, const unsigned long *src, ...@@ -237,7 +237,7 @@ void bitmap_cut(unsigned long *dst, const unsigned long *src,
} }
EXPORT_SYMBOL(bitmap_cut); EXPORT_SYMBOL(bitmap_cut);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits) const unsigned long *bitmap2, unsigned int bits)
{ {
unsigned int k; unsigned int k;
...@@ -275,7 +275,7 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, ...@@ -275,7 +275,7 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
} }
EXPORT_SYMBOL(__bitmap_xor); EXPORT_SYMBOL(__bitmap_xor);
int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits) const unsigned long *bitmap2, unsigned int bits)
{ {
unsigned int k; unsigned int k;
...@@ -333,10 +333,9 @@ bool __bitmap_subset(const unsigned long *bitmap1, ...@@ -333,10 +333,9 @@ bool __bitmap_subset(const unsigned long *bitmap1,
} }
EXPORT_SYMBOL(__bitmap_subset); EXPORT_SYMBOL(__bitmap_subset);
int __bitmap_weight(const unsigned long *bitmap, unsigned int bits) unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
{ {
unsigned int k, lim = bits/BITS_PER_LONG; unsigned int k, lim = bits/BITS_PER_LONG, w = 0;
int w = 0;
for (k = 0; k < lim; k++) for (k = 0; k < lim; k++)
w += hweight_long(bitmap[k]); w += hweight_long(bitmap[k]);
...@@ -1564,7 +1563,7 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits) ...@@ -1564,7 +1563,7 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits)
/* Clear tail bits in the last element of array beyond nbits. */ /* Clear tail bits in the last element of array beyond nbits. */
if (nbits % 64) if (nbits % 64)
buf[-1] &= GENMASK_ULL(nbits % 64, 0); buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0);
} }
EXPORT_SYMBOL(bitmap_to_arr64); EXPORT_SYMBOL(bitmap_to_arr64);
#endif #endif
...@@ -7,61 +7,6 @@ ...@@ -7,61 +7,6 @@
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/numa.h> #include <linux/numa.h>
/**
* cpumask_next - get the next cpu in a cpumask
* @n: the cpu prior to the place to search (ie. return will be > @n)
* @srcp: the cpumask pointer
*
* Returns >= nr_cpu_ids if no further cpus set.
*/
unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
}
EXPORT_SYMBOL(cpumask_next);
/**
* cpumask_next_and - get the next cpu in *src1p & *src2p
* @n: the cpu prior to the place to search (ie. return will be > @n)
* @src1p: the first cpumask pointer
* @src2p: the second cpumask pointer
*
* Returns >= nr_cpu_ids if no further cpus set in both.
*/
int cpumask_next_and(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
nr_cpumask_bits, n + 1);
}
EXPORT_SYMBOL(cpumask_next_and);
/**
* cpumask_any_but - return a "random" in a cpumask, but not this one.
* @mask: the cpumask to search
* @cpu: the cpu to ignore.
*
* Often used to find any cpu but smp_processor_id() in a mask.
* Returns >= nr_cpu_ids if no cpus set.
*/
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
{
unsigned int i;
cpumask_check(cpu);
for_each_cpu(i, mask)
if (i != cpu)
break;
return i;
}
EXPORT_SYMBOL(cpumask_any_but);
/** /**
* cpumask_next_wrap - helper to implement for_each_cpu_wrap * cpumask_next_wrap - helper to implement for_each_cpu_wrap
* @n: the cpu prior to the place to search * @n: the cpu prior to the place to search
...@@ -74,9 +19,9 @@ EXPORT_SYMBOL(cpumask_any_but); ...@@ -74,9 +19,9 @@ EXPORT_SYMBOL(cpumask_any_but);
* Note: the @wrap argument is required for the start condition when * Note: the @wrap argument is required for the start condition when
* we cannot assume @start is set in @mask. * we cannot assume @start is set in @mask.
*/ */
int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
{ {
int next; unsigned int next;
again: again:
next = cpumask_next(n, mask); next = cpumask_next(n, mask);
...@@ -125,34 +70,6 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) ...@@ -125,34 +70,6 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
} }
EXPORT_SYMBOL(alloc_cpumask_var_node); EXPORT_SYMBOL(alloc_cpumask_var_node);
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
}
EXPORT_SYMBOL(zalloc_cpumask_var_node);
/**
* alloc_cpumask_var - allocate a struct cpumask
* @mask: pointer to cpumask_var_t where the cpumask is returned
* @flags: GFP_ flags
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
* a nop returning a constant 1 (in <linux/cpumask.h>).
*
* See alloc_cpumask_var_node.
*/
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
}
EXPORT_SYMBOL(alloc_cpumask_var);
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var(mask, flags | __GFP_ZERO);
}
EXPORT_SYMBOL(zalloc_cpumask_var);
/** /**
* alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
* @mask: pointer to cpumask_var_t where the cpumask is returned * @mask: pointer to cpumask_var_t where the cpumask is returned
...@@ -206,7 +123,7 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) ...@@ -206,7 +123,7 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
*/ */
unsigned int cpumask_local_spread(unsigned int i, int node) unsigned int cpumask_local_spread(unsigned int i, int node)
{ {
int cpu; unsigned int cpu;
/* Wrap: we always want a cpu. */ /* Wrap: we always want a cpu. */
i %= num_online_cpus(); i %= num_online_cpus();
...@@ -244,10 +161,10 @@ static DEFINE_PER_CPU(int, distribute_cpu_mask_prev); ...@@ -244,10 +161,10 @@ static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
* *
* Returns >= nr_cpu_ids if the intersection is empty. * Returns >= nr_cpu_ids if the intersection is empty.
*/ */
int cpumask_any_and_distribute(const struct cpumask *src1p, unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p) const struct cpumask *src2p)
{ {
int next, prev; unsigned int next, prev;
/* NOTE: our first selection will skip 0. */ /* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev); prev = __this_cpu_read(distribute_cpu_mask_prev);
...@@ -263,9 +180,9 @@ int cpumask_any_and_distribute(const struct cpumask *src1p, ...@@ -263,9 +180,9 @@ int cpumask_any_and_distribute(const struct cpumask *src1p,
} }
EXPORT_SYMBOL(cpumask_any_and_distribute); EXPORT_SYMBOL(cpumask_any_and_distribute);
int cpumask_any_distribute(const struct cpumask *srcp) unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{ {
int next, prev; unsigned int next, prev;
/* NOTE: our first selection will skip 0. */ /* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev); prev = __this_cpu_read(distribute_cpu_mask_prev);
......
...@@ -3,14 +3,6 @@ ...@@ -3,14 +3,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/random.h> #include <linux/random.h>
unsigned int __next_node_in(int node, const nodemask_t *srcp)
{
unsigned int ret = __next_node(node, srcp);
if (ret == MAX_NUMNODES)
ret = __first_node(srcp);
return ret;
}
EXPORT_SYMBOL(__next_node_in); EXPORT_SYMBOL(__next_node_in);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
......
...@@ -604,6 +604,12 @@ static void __init test_bitmap_arr64(void) ...@@ -604,6 +604,12 @@ static void __init test_bitmap_arr64(void)
pr_err("bitmap_copy_arr64(nbits == %d:" pr_err("bitmap_copy_arr64(nbits == %d:"
" tail is not safely cleared: %d\n", nbits, next_bit); " tail is not safely cleared: %d\n", nbits, next_bit);
if ((nbits % 64) &&
(arr[(nbits - 1) / 64] & ~GENMASK_ULL((nbits - 1) % 64, 0)))
pr_err("bitmap_to_arr64(nbits == %d): tail is not safely cleared: 0x%016llx (must be 0x%016llx)\n",
nbits, arr[(nbits - 1) / 64],
GENMASK_ULL((nbits - 1) % 64, 0));
if (nbits < EXP1_IN_BITS - 64) if (nbits < EXP1_IN_BITS - 64)
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5); expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5);
} }
...@@ -869,6 +875,67 @@ static void __init test_bitmap_print_buf(void) ...@@ -869,6 +875,67 @@ static void __init test_bitmap_print_buf(void)
} }
} }
static void __init test_bitmap_const_eval(void)
{
DECLARE_BITMAP(bitmap, BITS_PER_LONG);
unsigned long initvar = BIT(2);
unsigned long bitopvar = 0;
unsigned long var = 0;
int res;
/*
* Compilers must be able to optimize all of those to compile-time
* constants on any supported optimization level (-O2, -Os) and any
* architecture. Otherwise, trigger a build bug.
* The whole function gets optimized out then, there's nothing to do
* in runtime.
*/
/*
* Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`.
* Clang on s390 optimizes bitops at compile-time as intended, but at
* the same time stops treating @bitmap and @bitopvar as compile-time
* constants after regular test_bit() is executed, thus triggering the
* build bugs below. So, call const_test_bit() there directly until
* the compiler is fixed.
*/
bitmap_clear(bitmap, 0, BITS_PER_LONG);
#if defined(__s390__) && defined(__clang__)
if (!const_test_bit(7, bitmap))
#else
if (!test_bit(7, bitmap))
#endif
bitmap_set(bitmap, 5, 2);
/* Equals to `unsigned long bitopvar = BIT(20)` */
__change_bit(31, &bitopvar);
bitmap_shift_right(&bitopvar, &bitopvar, 11, BITS_PER_LONG);
/* Equals to `unsigned long var = BIT(25)` */
var |= BIT(25);
if (var & BIT(0))
var ^= GENMASK(9, 6);
/* __const_hweight<32|64>(GENMASK(6, 5)) == 2 */
res = bitmap_weight(bitmap, 20);
BUILD_BUG_ON(!__builtin_constant_p(res));
BUILD_BUG_ON(res != 2);
/* !(BIT(31) & BIT(18)) == 1 */
res = !test_bit(18, &bitopvar);
BUILD_BUG_ON(!__builtin_constant_p(res));
BUILD_BUG_ON(!res);
/* BIT(2) & GENMASK(14, 8) == 0 */
res = initvar & GENMASK(14, 8);
BUILD_BUG_ON(!__builtin_constant_p(res));
BUILD_BUG_ON(res);
/* ~BIT(25) */
BUILD_BUG_ON(!__builtin_constant_p(~var));
BUILD_BUG_ON(~var != ~BIT(25));
}
static void __init selftest(void) static void __init selftest(void)
{ {
test_zero_clear(); test_zero_clear();
...@@ -884,6 +951,7 @@ static void __init selftest(void) ...@@ -884,6 +951,7 @@ static void __init selftest(void)
test_for_each_set_clump8(); test_for_each_set_clump8();
test_bitmap_cut(); test_bitmap_cut();
test_bitmap_print_buf(); test_bitmap_print_buf();
test_bitmap_const_eval();
} }
KSTM_MODULE_LOADERS(test_bitmap); KSTM_MODULE_LOADERS(test_bitmap);
......
...@@ -2,10 +2,10 @@ ...@@ -2,10 +2,10 @@
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#include <asm/types.h> #include <linux/bits.h>
/** /**
* __set_bit - Set a bit in memory * ___set_bit - Set a bit in memory
* @nr: the bit to set * @nr: the bit to set
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -13,7 +13,8 @@ ...@@ -13,7 +13,8 @@
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static inline void __set_bit(int nr, volatile unsigned long *addr) static __always_inline void
___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -21,7 +22,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) ...@@ -21,7 +22,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
*p |= mask; *p |= mask;
} }
static inline void __clear_bit(int nr, volatile unsigned long *addr) static __always_inline void
___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -30,7 +32,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) ...@@ -30,7 +32,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
} }
/** /**
* __change_bit - Toggle a bit in memory * ___change_bit - Toggle a bit in memory
* @nr: the bit to change * @nr: the bit to change
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -38,7 +40,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) ...@@ -38,7 +40,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static inline void __change_bit(int nr, volatile unsigned long *addr) static __always_inline void
___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -47,7 +50,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) ...@@ -47,7 +50,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
} }
/** /**
* __test_and_set_bit - Set a bit and return its old value * ___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set * @nr: Bit to set
* @addr: Address to count from * @addr: Address to count from
* *
...@@ -55,7 +58,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) ...@@ -55,7 +58,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) static __always_inline bool
___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -66,7 +70,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) ...@@ -66,7 +70,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
} }
/** /**
* __test_and_clear_bit - Clear a bit and return its old value * ___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear * @nr: Bit to clear
* @addr: Address to count from * @addr: Address to count from
* *
...@@ -74,7 +78,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) ...@@ -74,7 +78,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) static __always_inline bool
___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -85,8 +90,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) ...@@ -85,8 +90,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
} }
/* WARNING: non atomic and it can be reordered! */ /* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr, static __always_inline bool
volatile unsigned long *addr) ___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -97,11 +102,12 @@ static inline int __test_and_change_bit(int nr, ...@@ -97,11 +102,12 @@ static inline int __test_and_change_bit(int nr,
} }
/** /**
* test_bit - Determine whether a bit is set * _test_bit - Determine whether a bit is set
* @nr: bit number to test * @nr: bit number to test
* @addr: Address to start counting from * @addr: Address to start counting from
*/ */
static inline int test_bit(int nr, const volatile unsigned long *addr) static __always_inline bool
_test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
} }
......
...@@ -11,10 +11,10 @@ ...@@ -11,10 +11,10 @@
#define DECLARE_BITMAP(name,bits) \ #define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)] unsigned long name[BITS_TO_LONGS(bits)]
int __bitmap_weight(const unsigned long *bitmap, int bits); unsigned int __bitmap_weight(const unsigned long *bitmap, int bits);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits); const unsigned long *bitmap2, int bits);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits); const unsigned long *bitmap2, unsigned int bits);
bool __bitmap_equal(const unsigned long *bitmap1, bool __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits); const unsigned long *bitmap2, unsigned int bits);
...@@ -45,7 +45,7 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) ...@@ -45,7 +45,7 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
} }
static inline int bitmap_empty(const unsigned long *src, unsigned nbits) static inline bool bitmap_empty(const unsigned long *src, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
...@@ -53,7 +53,7 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits) ...@@ -53,7 +53,7 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
return find_first_bit(src, nbits) == nbits; return find_first_bit(src, nbits) == nbits;
} }
static inline int bitmap_full(const unsigned long *src, unsigned int nbits) static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
...@@ -61,7 +61,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits) ...@@ -61,7 +61,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits; return find_first_zero_bit(src, nbits) == nbits;
} }
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits) static inline unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
...@@ -146,7 +146,7 @@ size_t bitmap_scnprintf(unsigned long *bitmap, unsigned int nbits, ...@@ -146,7 +146,7 @@ size_t bitmap_scnprintf(unsigned long *bitmap, unsigned int nbits,
* @src2: operand 2 * @src2: operand 2
* @nbits: size of bitmap * @nbits: size of bitmap
*/ */
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits) const unsigned long *src2, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
......
...@@ -25,6 +25,22 @@ extern unsigned int __sw_hweight16(unsigned int w); ...@@ -25,6 +25,22 @@ extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w); extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w); extern unsigned long __sw_hweight64(__u64 w);
/*
* Defined here because those may be needed by architecture-specific static
* inlines.
*/
#define bitop(op, nr, addr) \
op(nr, addr)
#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
/* /*
* Include this here because some architectures need generic_ffs/fls in * Include this here because some architectures need generic_ffs/fls in
* scope * scope
......
...@@ -5,9 +5,9 @@ ...@@ -5,9 +5,9 @@
*/ */
#include <linux/bitmap.h> #include <linux/bitmap.h>
int __bitmap_weight(const unsigned long *bitmap, int bits) unsigned int __bitmap_weight(const unsigned long *bitmap, int bits)
{ {
int k, w = 0, lim = bits/BITS_PER_LONG; unsigned int k, w = 0, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; k++) for (k = 0; k < lim; k++)
w += hweight_long(bitmap[k]); w += hweight_long(bitmap[k]);
...@@ -57,7 +57,7 @@ size_t bitmap_scnprintf(unsigned long *bitmap, unsigned int nbits, ...@@ -57,7 +57,7 @@ size_t bitmap_scnprintf(unsigned long *bitmap, unsigned int nbits,
return ret; return ret;
} }
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits) const unsigned long *bitmap2, unsigned int bits)
{ {
unsigned int k; unsigned int k;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment