Commit 3b4beb31 authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

[S390] Remove owner_pc member from raw_spinlock_t.

Used to contain the address of the holder of the lock. But since the
spinlock code is not inlined anymore all locks contain the same address
anyway. And since in addtition nobody complained about that for ages
its obviously unused. So remove it.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 9f4b0ba8
......@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
_raw_yield();
}
void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
void _raw_spin_lock_wait(raw_spinlock_t *lp)
{
int count = spin_retry;
unsigned int cpu = ~smp_processor_id();
......@@ -53,15 +53,13 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
}
if (__raw_spin_is_locked(lp))
continue;
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
lp->owner_pc = pc;
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return;
}
}
}
EXPORT_SYMBOL(_raw_spin_lock_wait);
int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
int _raw_spin_trylock_retry(raw_spinlock_t *lp)
{
unsigned int cpu = ~smp_processor_id();
int count;
......@@ -69,10 +67,8 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
for (count = spin_retry; count > 0; count--) {
if (__raw_spin_is_locked(lp))
continue;
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
lp->owner_pc = pc;
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return 1;
}
}
return 0;
}
......
......@@ -58,39 +58,32 @@ _raw_compare_and_swap(volatile unsigned int *lock,
do { while (__raw_spin_is_locked(lock)) \
_raw_spin_relax(lock); } while (0)
extern void _raw_spin_lock_wait(raw_spinlock_t *, unsigned int pc);
extern int _raw_spin_trylock_retry(raw_spinlock_t *, unsigned int pc);
extern void _raw_spin_lock_wait(raw_spinlock_t *);
extern int _raw_spin_trylock_retry(raw_spinlock_t *);
extern void _raw_spin_relax(raw_spinlock_t *lock);
static inline void __raw_spin_lock(raw_spinlock_t *lp)
{
unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
int old;
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0)) {
lp->owner_pc = pc;
if (likely(old == 0))
return;
}
_raw_spin_lock_wait(lp, pc);
_raw_spin_lock_wait(lp);
}
static inline int __raw_spin_trylock(raw_spinlock_t *lp)
{
unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
int old;
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0)) {
lp->owner_pc = pc;
if (likely(old == 0))
return 1;
}
return _raw_spin_trylock_retry(lp, pc);
return _raw_spin_trylock_retry(lp);
}
static inline void __raw_spin_unlock(raw_spinlock_t *lp)
{
lp->owner_pc = 0;
_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
}
......
......@@ -7,7 +7,6 @@
typedef struct {
volatile unsigned int owner_cpu;
volatile unsigned int owner_pc;
} __attribute__ ((aligned (4))) raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment