Commit 95ffa676 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'parisc-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:
 "The majority of the patches are reverts of previous commits regarding
  the parisc-specific low level spinlocking code and barrier handling,
  with which we tried to fix CPU stalls on our build servers. In the end
  John David Anglin found the culprit: We missed a define for
  atomic64_set_release(). This seems to have fixed our issues, so now
  it's good to remove the unnecessary code again.

  Other than that it's trivial stuff: Spelling fixes, constifications
  and such"

* 'parisc-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: make the log level string for register dumps const
  parisc: Do not use an ordered store in pa_tlb_lock()
  Revert "parisc: Revert "Release spinlocks using ordered store""
  Revert "parisc: Use ldcw instruction for SMP spinlock release barrier"
  Revert "parisc: Drop LDCW barrier in CAS code when running UP"
  Revert "parisc: Improve interrupt handling in arch_spin_lock_flags()"
  parisc: Replace HTTP links with HTTPS ones
  parisc: elf.h: delete a duplicated word
  parisc: Report bad pages as HardwareCorrupted
  parisc: Convert to BIT_MASK() and BIT_WORD()
parents 4da9f330 e2693ec1
...@@ -285,7 +285,7 @@ config SMP ...@@ -285,7 +285,7 @@ config SMP
On a uniprocessor machine, the kernel will run faster if you say N. On a uniprocessor machine, the kernel will run faster if you say N.
See also <file:Documentation/admin-guide/lockup-watchdogs.rst> and the SMP-HOWTO See also <file:Documentation/admin-guide/lockup-watchdogs.rst> and the SMP-HOWTO
available at <http://www.tldp.org/docs.html#howto>. available at <https://www.tldp.org/docs.html#howto>.
If you don't know what to do here, say N. If you don't know what to do here, say N.
......
...@@ -12,21 +12,6 @@ ...@@ -12,21 +12,6 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <linux/atomic.h> #include <linux/atomic.h>
/*
* HP-PARISC specific bit operations
* for a detailed description of the functions please refer
* to include/asm-i386/bitops.h or kerneldoc
*/
#if __BITS_PER_LONG == 64
#define SHIFT_PER_LONG 6
#else
#define SHIFT_PER_LONG 5
#endif
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
* on use of volatile and __*_bit() (set/clear/change): * on use of volatile and __*_bit() (set/clear/change):
* *_bit() want use of volatile. * *_bit() want use of volatile.
...@@ -35,10 +20,10 @@ ...@@ -35,10 +20,10 @@
static __inline__ void set_bit(int nr, volatile unsigned long * addr) static __inline__ void set_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); unsigned long mask = BIT_MASK(nr);
unsigned long flags; unsigned long flags;
addr += (nr >> SHIFT_PER_LONG); addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags); _atomic_spin_lock_irqsave(addr, flags);
*addr |= mask; *addr |= mask;
_atomic_spin_unlock_irqrestore(addr, flags); _atomic_spin_unlock_irqrestore(addr, flags);
...@@ -46,21 +31,21 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr) ...@@ -46,21 +31,21 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr)
static __inline__ void clear_bit(int nr, volatile unsigned long * addr) static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); unsigned long mask = BIT_MASK(nr);
unsigned long flags; unsigned long flags;
addr += (nr >> SHIFT_PER_LONG); addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags); _atomic_spin_lock_irqsave(addr, flags);
*addr &= mask; *addr &= ~mask;
_atomic_spin_unlock_irqrestore(addr, flags); _atomic_spin_unlock_irqrestore(addr, flags);
} }
static __inline__ void change_bit(int nr, volatile unsigned long * addr) static __inline__ void change_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); unsigned long mask = BIT_MASK(nr);
unsigned long flags; unsigned long flags;
addr += (nr >> SHIFT_PER_LONG); addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags); _atomic_spin_lock_irqsave(addr, flags);
*addr ^= mask; *addr ^= mask;
_atomic_spin_unlock_irqrestore(addr, flags); _atomic_spin_unlock_irqrestore(addr, flags);
...@@ -68,12 +53,12 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr) ...@@ -68,12 +53,12 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr)
static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); unsigned long mask = BIT_MASK(nr);
unsigned long old; unsigned long old;
unsigned long flags; unsigned long flags;
int set; int set;
addr += (nr >> SHIFT_PER_LONG); addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags); _atomic_spin_lock_irqsave(addr, flags);
old = *addr; old = *addr;
set = (old & mask) ? 1 : 0; set = (old & mask) ? 1 : 0;
...@@ -86,12 +71,12 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) ...@@ -86,12 +71,12 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); unsigned long mask = BIT_MASK(nr);
unsigned long old; unsigned long old;
unsigned long flags; unsigned long flags;
int set; int set;
addr += (nr >> SHIFT_PER_LONG); addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags); _atomic_spin_lock_irqsave(addr, flags);
old = *addr; old = *addr;
set = (old & mask) ? 1 : 0; set = (old & mask) ? 1 : 0;
...@@ -104,11 +89,11 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) ...@@ -104,11 +89,11 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); unsigned long mask = BIT_MASK(nr);
unsigned long oldbit; unsigned long oldbit;
unsigned long flags; unsigned long flags;
addr += (nr >> SHIFT_PER_LONG); addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags); _atomic_spin_lock_irqsave(addr, flags);
oldbit = *addr; oldbit = *addr;
*addr = oldbit ^ mask; *addr = oldbit ^ mask;
......
...@@ -152,7 +152,7 @@ ...@@ -152,7 +152,7 @@
/* The following are PA function descriptors /* The following are PA function descriptors
* *
* addr: the absolute address of the function * addr: the absolute address of the function
* gp: either the data pointer (r27) for non-PIC code or the * gp: either the data pointer (r27) for non-PIC code or
* the PLT pointer (r19) for PIC code */ * the PLT pointer (r19) for PIC code */
/* Format for the Elf32 Function descriptor */ /* Format for the Elf32 Function descriptor */
......
...@@ -10,34 +10,25 @@ ...@@ -10,34 +10,25 @@
static inline int arch_spin_is_locked(arch_spinlock_t *x) static inline int arch_spin_is_locked(arch_spinlock_t *x)
{ {
volatile unsigned int *a = __ldcw_align(x); volatile unsigned int *a = __ldcw_align(x);
smp_mb();
return *a == 0; return *a == 0;
} }
static inline void arch_spin_lock(arch_spinlock_t *x) #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
{
volatile unsigned int *a;
a = __ldcw_align(x);
while (__ldcw(a) == 0)
while (*a == 0)
cpu_relax();
}
static inline void arch_spin_lock_flags(arch_spinlock_t *x, static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags) unsigned long flags)
{ {
volatile unsigned int *a; volatile unsigned int *a;
unsigned long flags_dis;
a = __ldcw_align(x); a = __ldcw_align(x);
while (__ldcw(a) == 0) { while (__ldcw(a) == 0)
local_save_flags(flags_dis);
local_irq_restore(flags);
while (*a == 0) while (*a == 0)
if (flags & PSW_SM_I) {
local_irq_enable();
cpu_relax();
local_irq_disable();
} else
cpu_relax(); cpu_relax();
local_irq_restore(flags_dis);
}
} }
#define arch_spin_lock_flags arch_spin_lock_flags #define arch_spin_lock_flags arch_spin_lock_flags
...@@ -46,12 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x) ...@@ -46,12 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
volatile unsigned int *a; volatile unsigned int *a;
a = __ldcw_align(x); a = __ldcw_align(x);
#ifdef CONFIG_SMP /* Release with ordered store. */
(void) __ldcw(a); __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
#else
mb();
#endif
*a = 1;
} }
static inline int arch_spin_trylock(arch_spinlock_t *x) static inline int arch_spin_trylock(arch_spinlock_t *x)
......
...@@ -454,7 +454,6 @@ ...@@ -454,7 +454,6 @@
nop nop
LDREG 0(\ptp),\pte LDREG 0(\ptp),\pte
bb,<,n \pte,_PAGE_PRESENT_BIT,3f bb,<,n \pte,_PAGE_PRESENT_BIT,3f
LDCW 0(\tmp),\tmp1
b \fault b \fault
stw \spc,0(\tmp) stw \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
...@@ -464,23 +463,26 @@ ...@@ -464,23 +463,26 @@
3: 3:
.endm .endm
/* Release pa_tlb_lock lock without reloading lock address. */ /* Release pa_tlb_lock lock without reloading lock address.
.macro tlb_unlock0 spc,tmp,tmp1 Note that the values in the register spc are limited to
NR_SPACE_IDS (262144). Thus, the stw instruction always
stores a nonzero value even when register spc is 64 bits.
We use an ordered store to ensure all prior accesses are
performed prior to releasing the lock. */
.macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
98: or,COND(=) %r0,\spc,%r0 98: or,COND(=) %r0,\spc,%r0
LDCW 0(\tmp),\tmp1 stw,ma \spc,0(\tmp)
or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif #endif
.endm .endm
/* Release pa_tlb_lock lock. */ /* Release pa_tlb_lock lock. */
.macro tlb_unlock1 spc,tmp,tmp1 .macro tlb_unlock1 spc,tmp
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
98: load_pa_tlb_lock \tmp 98: load_pa_tlb_lock \tmp
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
tlb_unlock0 \spc,\tmp,\tmp1 tlb_unlock0 \spc,\tmp
#endif #endif
.endm .endm
...@@ -1163,7 +1165,7 @@ dtlb_miss_20w: ...@@ -1163,7 +1165,7 @@ dtlb_miss_20w:
idtlbt pte,prot idtlbt pte,prot
tlb_unlock1 spc,t0,t1 tlb_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1189,7 +1191,7 @@ nadtlb_miss_20w: ...@@ -1189,7 +1191,7 @@ nadtlb_miss_20w:
idtlbt pte,prot idtlbt pte,prot
tlb_unlock1 spc,t0,t1 tlb_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1223,7 +1225,7 @@ dtlb_miss_11: ...@@ -1223,7 +1225,7 @@ dtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */ mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0,t1 tlb_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1256,7 +1258,7 @@ nadtlb_miss_11: ...@@ -1256,7 +1258,7 @@ nadtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */ mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0,t1 tlb_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1285,7 +1287,7 @@ dtlb_miss_20: ...@@ -1285,7 +1287,7 @@ dtlb_miss_20:
idtlbt pte,prot idtlbt pte,prot
tlb_unlock1 spc,t0,t1 tlb_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1313,7 +1315,7 @@ nadtlb_miss_20: ...@@ -1313,7 +1315,7 @@ nadtlb_miss_20:
idtlbt pte,prot idtlbt pte,prot
tlb_unlock1 spc,t0,t1 tlb_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1420,7 +1422,7 @@ itlb_miss_20w: ...@@ -1420,7 +1422,7 @@ itlb_miss_20w:
iitlbt pte,prot iitlbt pte,prot
tlb_unlock1 spc,t0,t1 tlb_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1444,7 +1446,7 @@ naitlb_miss_20w: ...@@ -1444,7 +1446,7 @@ naitlb_miss_20w:
iitlbt pte,prot iitlbt pte,prot
tlb_unlock1 spc,t0,t1 tlb_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1478,7 +1480,7 @@ itlb_miss_11: ...@@ -1478,7 +1480,7 @@ itlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */ mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0,t1 tlb_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1502,7 +1504,7 @@ naitlb_miss_11: ...@@ -1502,7 +1504,7 @@ naitlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */ mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0,t1 tlb_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1532,7 +1534,7 @@ itlb_miss_20: ...@@ -1532,7 +1534,7 @@ itlb_miss_20:
iitlbt pte,prot iitlbt pte,prot
tlb_unlock1 spc,t0,t1 tlb_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1552,7 +1554,7 @@ naitlb_miss_20: ...@@ -1552,7 +1554,7 @@ naitlb_miss_20:
iitlbt pte,prot iitlbt pte,prot
tlb_unlock1 spc,t0,t1 tlb_unlock1 spc,t0
rfir rfir
nop nop
...@@ -1582,7 +1584,7 @@ dbit_trap_20w: ...@@ -1582,7 +1584,7 @@ dbit_trap_20w:
idtlbt pte,prot idtlbt pte,prot
tlb_unlock0 spc,t0,t1 tlb_unlock0 spc,t0
rfir rfir
nop nop
#else #else
...@@ -1608,7 +1610,7 @@ dbit_trap_11: ...@@ -1608,7 +1610,7 @@ dbit_trap_11:
mtsp t1, %sr1 /* Restore sr1 */ mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock0 spc,t0,t1 tlb_unlock0 spc,t0
rfir rfir
nop nop
...@@ -1628,7 +1630,7 @@ dbit_trap_20: ...@@ -1628,7 +1630,7 @@ dbit_trap_20:
idtlbt pte,prot idtlbt pte,prot
tlb_unlock0 spc,t0,t1 tlb_unlock0 spc,t0
rfir rfir
nop nop
#endif #endif
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/initrd.h> #include <linux/initrd.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <asm/pdc.h> #include <asm/pdc.h>
#include <asm/pdcpat.h> #include <asm/pdcpat.h>
...@@ -230,6 +232,7 @@ void __init pdc_pdt_init(void) ...@@ -230,6 +232,7 @@ void __init pdc_pdt_init(void)
/* mark memory page bad */ /* mark memory page bad */
memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE); memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE);
num_poisoned_pages_inc();
} }
} }
......
...@@ -640,11 +640,7 @@ cas_action: ...@@ -640,11 +640,7 @@ cas_action:
sub,<> %r28, %r25, %r0 sub,<> %r28, %r25, %r0
2: stw %r24, 0(%r26) 2: stw %r24, 0(%r26)
/* Free lock */ /* Free lock */
#ifdef CONFIG_SMP stw,ma %r20, 0(%sr2,%r20)
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG #if ENABLE_LWS_DEBUG
/* Clear thread register indicator */ /* Clear thread register indicator */
stw %r0, 4(%sr2,%r20) stw %r0, 4(%sr2,%r20)
...@@ -658,11 +654,7 @@ cas_action: ...@@ -658,11 +654,7 @@ cas_action:
3: 3:
/* Error occurred on load or store */ /* Error occurred on load or store */
/* Free lock */ /* Free lock */
#ifdef CONFIG_SMP stw,ma %r20, 0(%sr2,%r20)
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG #if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20) stw %r0, 4(%sr2,%r20)
#endif #endif
...@@ -863,11 +855,7 @@ cas2_action: ...@@ -863,11 +855,7 @@ cas2_action:
cas2_end: cas2_end:
/* Free lock */ /* Free lock */
#ifdef CONFIG_SMP stw,ma %r20, 0(%sr2,%r20)
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
stw %r20, 0(%sr2,%r20)
/* Enable interrupts */ /* Enable interrupts */
ssm PSW_SM_I, %r0 ssm PSW_SM_I, %r0
/* Return to userspace, set no error */ /* Return to userspace, set no error */
...@@ -877,11 +865,7 @@ cas2_end: ...@@ -877,11 +865,7 @@ cas2_end:
22: 22:
/* Error occurred on load or store */ /* Error occurred on load or store */
/* Free lock */ /* Free lock */
#ifdef CONFIG_SMP stw,ma %r20, 0(%sr2,%r20)
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0 ssm PSW_SM_I, %r0
ldo 1(%r0),%r28 ldo 1(%r0),%r28
b lws_exit b lws_exit
......
...@@ -75,7 +75,7 @@ static int printbinary(char *buf, unsigned long x, int nbits) ...@@ -75,7 +75,7 @@ static int printbinary(char *buf, unsigned long x, int nbits)
lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \ lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
(r)[(x)+2], (r)[(x)+3]) (r)[(x)+2], (r)[(x)+3])
static void print_gr(char *level, struct pt_regs *regs) static void print_gr(const char *level, struct pt_regs *regs)
{ {
int i; int i;
char buf[64]; char buf[64];
...@@ -89,7 +89,7 @@ static void print_gr(char *level, struct pt_regs *regs) ...@@ -89,7 +89,7 @@ static void print_gr(char *level, struct pt_regs *regs)
PRINTREGS(level, regs->gr, "r", RFMT, i); PRINTREGS(level, regs->gr, "r", RFMT, i);
} }
static void print_fr(char *level, struct pt_regs *regs) static void print_fr(const char *level, struct pt_regs *regs)
{ {
int i; int i;
char buf[64]; char buf[64];
...@@ -119,7 +119,7 @@ static void print_fr(char *level, struct pt_regs *regs) ...@@ -119,7 +119,7 @@ static void print_fr(char *level, struct pt_regs *regs)
void show_regs(struct pt_regs *regs) void show_regs(struct pt_regs *regs)
{ {
int i, user; int i, user;
char *level; const char *level;
unsigned long cr30, cr31; unsigned long cr30, cr31;
user = user_mode(regs); user = user_mode(regs);
......
...@@ -750,7 +750,7 @@ unsigned long alloc_sid(void) ...@@ -750,7 +750,7 @@ unsigned long alloc_sid(void)
free_space_ids--; free_space_ids--;
index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); space_id[BIT_WORD(index)] |= BIT_MASK(index);
space_id_index = index; space_id_index = index;
spin_unlock(&sid_lock); spin_unlock(&sid_lock);
...@@ -761,16 +761,16 @@ unsigned long alloc_sid(void) ...@@ -761,16 +761,16 @@ unsigned long alloc_sid(void)
void free_sid(unsigned long spaceid) void free_sid(unsigned long spaceid)
{ {
unsigned long index = spaceid >> SPACEID_SHIFT; unsigned long index = spaceid >> SPACEID_SHIFT;
unsigned long *dirty_space_offset; unsigned long *dirty_space_offset, mask;
dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); dirty_space_offset = &dirty_space_id[BIT_WORD(index)];
index &= (BITS_PER_LONG - 1); mask = BIT_MASK(index);
spin_lock(&sid_lock); spin_lock(&sid_lock);
BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */
*dirty_space_offset |= (1L << index); *dirty_space_offset |= mask;
dirty_space_ids++; dirty_space_ids++;
spin_unlock(&sid_lock); spin_unlock(&sid_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment