Commit 208151bf authored by Helge Deller's avatar Helge Deller

parisc: Convert to BIT_MASK() and BIT_WORD()

Drop own open-coded implementation to set bits and use the kernel
provided BIT_MASK() and BIT_WORD() macros.
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent ba47d845
...@@ -12,21 +12,6 @@ ...@@ -12,21 +12,6 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <linux/atomic.h> #include <linux/atomic.h>
/*
* HP-PARISC specific bit operations
* for a detailed description of the functions please refer
* to include/asm-i386/bitops.h or kerneldoc
*/
#if __BITS_PER_LONG == 64
#define SHIFT_PER_LONG 6
#else
#define SHIFT_PER_LONG 5
#endif
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
* on use of volatile and __*_bit() (set/clear/change): * on use of volatile and __*_bit() (set/clear/change):
* *_bit() want use of volatile. * *_bit() want use of volatile.
...@@ -35,10 +20,10 @@ ...@@ -35,10 +20,10 @@
static __inline__ void set_bit(int nr, volatile unsigned long * addr) static __inline__ void set_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); unsigned long mask = BIT_MASK(nr);
unsigned long flags; unsigned long flags;
addr += (nr >> SHIFT_PER_LONG); addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags); _atomic_spin_lock_irqsave(addr, flags);
*addr |= mask; *addr |= mask;
_atomic_spin_unlock_irqrestore(addr, flags); _atomic_spin_unlock_irqrestore(addr, flags);
...@@ -46,21 +31,21 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr) ...@@ -46,21 +31,21 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr)
static __inline__ void clear_bit(int nr, volatile unsigned long * addr) static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); unsigned long mask = BIT_MASK(nr);
unsigned long flags; unsigned long flags;
addr += (nr >> SHIFT_PER_LONG); addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags); _atomic_spin_lock_irqsave(addr, flags);
*addr &= mask; *addr &= ~mask;
_atomic_spin_unlock_irqrestore(addr, flags); _atomic_spin_unlock_irqrestore(addr, flags);
} }
static __inline__ void change_bit(int nr, volatile unsigned long * addr) static __inline__ void change_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); unsigned long mask = BIT_MASK(nr);
unsigned long flags; unsigned long flags;
addr += (nr >> SHIFT_PER_LONG); addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags); _atomic_spin_lock_irqsave(addr, flags);
*addr ^= mask; *addr ^= mask;
_atomic_spin_unlock_irqrestore(addr, flags); _atomic_spin_unlock_irqrestore(addr, flags);
...@@ -68,12 +53,12 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr) ...@@ -68,12 +53,12 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr)
static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); unsigned long mask = BIT_MASK(nr);
unsigned long old; unsigned long old;
unsigned long flags; unsigned long flags;
int set; int set;
addr += (nr >> SHIFT_PER_LONG); addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags); _atomic_spin_lock_irqsave(addr, flags);
old = *addr; old = *addr;
set = (old & mask) ? 1 : 0; set = (old & mask) ? 1 : 0;
...@@ -86,12 +71,12 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) ...@@ -86,12 +71,12 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); unsigned long mask = BIT_MASK(nr);
unsigned long old; unsigned long old;
unsigned long flags; unsigned long flags;
int set; int set;
addr += (nr >> SHIFT_PER_LONG); addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags); _atomic_spin_lock_irqsave(addr, flags);
old = *addr; old = *addr;
set = (old & mask) ? 1 : 0; set = (old & mask) ? 1 : 0;
...@@ -104,11 +89,11 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) ...@@ -104,11 +89,11 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); unsigned long mask = BIT_MASK(nr);
unsigned long oldbit; unsigned long oldbit;
unsigned long flags; unsigned long flags;
addr += (nr >> SHIFT_PER_LONG); addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags); _atomic_spin_lock_irqsave(addr, flags);
oldbit = *addr; oldbit = *addr;
*addr = oldbit ^ mask; *addr = oldbit ^ mask;
......
...@@ -750,7 +750,7 @@ unsigned long alloc_sid(void) ...@@ -750,7 +750,7 @@ unsigned long alloc_sid(void)
free_space_ids--; free_space_ids--;
index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); space_id[BIT_WORD(index)] |= BIT_MASK(index);
space_id_index = index; space_id_index = index;
spin_unlock(&sid_lock); spin_unlock(&sid_lock);
...@@ -761,16 +761,16 @@ unsigned long alloc_sid(void) ...@@ -761,16 +761,16 @@ unsigned long alloc_sid(void)
void free_sid(unsigned long spaceid) void free_sid(unsigned long spaceid)
{ {
unsigned long index = spaceid >> SPACEID_SHIFT; unsigned long index = spaceid >> SPACEID_SHIFT;
unsigned long *dirty_space_offset; unsigned long *dirty_space_offset, mask;
dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); dirty_space_offset = &dirty_space_id[BIT_WORD(index)];
index &= (BITS_PER_LONG - 1); mask = BIT_MASK(index);
spin_lock(&sid_lock); spin_lock(&sid_lock);
BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */
*dirty_space_offset |= (1L << index); *dirty_space_offset |= mask;
dirty_space_ids++; dirty_space_ids++;
spin_unlock(&sid_lock); spin_unlock(&sid_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment