Commit f7e26c10 authored by Anton Blanchard's avatar Anton Blanchard Committed by Linus Torvalds

[PATCH] Move __preempt_*lock into kernel_spinlock, clean up.

 - create in_lock_functions() to match in_sched_functions(). Export it
   for use in oprofile.
 - use char __lock_text_start[] instead of long __lock_text_start when
   declaring linker symbols. Rusty fixed a number of these a while ago
   based on advice from rth.
 - Move __preempt_*_lock into kernel/spinlock.c and make it inline. This
   means locks are only one deep.
 - Make in_sched_functions() check in_lock_functions()
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent cd31a496
...@@ -57,8 +57,7 @@ unsigned long profile_pc(struct pt_regs *regs) ...@@ -57,8 +57,7 @@ unsigned long profile_pc(struct pt_regs *regs)
{ {
unsigned long fp, pc = instruction_pointer(regs); unsigned long fp, pc = instruction_pointer(regs);
if (pc >= (unsigned long)&__lock_text_start && if (in_lock_functions(pc)) {
pc <= (unsigned long)&__lock_text_end) {
fp = thread_saved_fp(current); fp = thread_saved_fp(current);
pc = pc_pointer(((unsigned long *)fp)[-1]); pc = pc_pointer(((unsigned long *)fp)[-1]);
} }
......
...@@ -205,8 +205,7 @@ unsigned long profile_pc(struct pt_regs *regs) ...@@ -205,8 +205,7 @@ unsigned long profile_pc(struct pt_regs *regs)
{ {
unsigned long pc = instruction_pointer(regs); unsigned long pc = instruction_pointer(regs);
if (pc >= (unsigned long)&__lock_text_start && if (in_lock_functions(pc))
pc <= (unsigned long)&__lock_text_end)
return *(unsigned long *)(regs->ebp + 4); return *(unsigned long *)(regs->ebp + 4);
return pc; return pc;
......
...@@ -113,8 +113,7 @@ unsigned long profile_pc(struct pt_regs *regs) ...@@ -113,8 +113,7 @@ unsigned long profile_pc(struct pt_regs *regs)
{ {
unsigned long pc = instruction_pointer(regs); unsigned long pc = instruction_pointer(regs);
if (pc >= (unsigned long)&__lock_text_start && if (in_lock_functions(pc))
pc <= (unsigned long)&__lock_text_end)
return regs->link; return regs->link;
return pc; return pc;
......
...@@ -163,8 +163,7 @@ unsigned long profile_pc(struct pt_regs *regs) ...@@ -163,8 +163,7 @@ unsigned long profile_pc(struct pt_regs *regs)
{ {
unsigned long pc = instruction_pointer(regs); unsigned long pc = instruction_pointer(regs);
if (pc >= (unsigned long)&__lock_text_start && if (in_lock_functions(pc))
pc <= (unsigned long)&__lock_text_end)
return regs->link; return regs->link;
return pc; return pc;
......
...@@ -81,20 +81,22 @@ struct intersil *intersil_clock; ...@@ -81,20 +81,22 @@ struct intersil *intersil_clock;
unsigned long profile_pc(struct pt_regs *regs) unsigned long profile_pc(struct pt_regs *regs)
{ {
extern int __copy_user_begin, __copy_user_end; extern char __copy_user_begin[], __copy_user_end[];
extern int __atomic_begin, __atomic_end; extern char __atomic_begin[], __atomic_end[];
extern int __bzero_begin, __bzero_end; extern char __bzero_begin[], __bzero_end[];
extern int __bitops_begin, __bitops_end; extern char __bitops_begin[], __bitops_end[];
unsigned long pc = regs->pc; unsigned long pc = regs->pc;
if ((pc >= (unsigned long) &__copy_user_begin && if (in_lock_functions(pc) ||
pc < (unsigned long) &__copy_user_end) || (pc >= (unsigned long) __copy_user_begin &&
(pc >= (unsigned long) &__atomic_begin && pc < (unsigned long) __copy_user_end) ||
pc < (unsigned long) &__atomic_end) || (pc >= (unsigned long) __atomic_begin &&
(pc >= (unsigned long) &__bzero_begin && pc < (unsigned long) __atomic_end) ||
pc < (unsigned long) &__bzero_end) || (pc >= (unsigned long) __bzero_begin &&
(pc >= (unsigned long) &__bitops_begin && pc < (unsigned long) __bzero_end) ||
pc < (unsigned long) &__bitops_end)) (pc >= (unsigned long) __bitops_begin &&
pc < (unsigned long) __bitops_end))
pc = regs->u_regs[UREG_RETPC]; pc = regs->u_regs[UREG_RETPC];
return pc; return pc;
} }
......
...@@ -73,8 +73,7 @@ unsigned long profile_pc(struct pt_regs *regs) ...@@ -73,8 +73,7 @@ unsigned long profile_pc(struct pt_regs *regs)
{ {
unsigned long pc = instruction_pointer(regs); unsigned long pc = instruction_pointer(regs);
if (pc >= (unsigned long)&__lock_text_start && if (in_lock_functions(pc))
pc <= (unsigned long)&__lock_text_end)
return regs->u_regs[UREG_RETPC]; return regs->u_regs[UREG_RETPC];
return pc; return pc;
} }
......
...@@ -184,8 +184,7 @@ unsigned long profile_pc(struct pt_regs *regs) ...@@ -184,8 +184,7 @@ unsigned long profile_pc(struct pt_regs *regs)
{ {
unsigned long pc = instruction_pointer(regs); unsigned long pc = instruction_pointer(regs);
if (pc >= (unsigned long)&__lock_text_start && if (in_lock_functions(pc))
pc <= (unsigned long)&__lock_text_end)
return *(unsigned long *)regs->rbp; return *(unsigned long *)regs->rbp;
return pc; return pc;
} }
......
...@@ -68,11 +68,11 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags); ...@@ -68,11 +68,11 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
void __lockfunc _write_unlock_irq(rwlock_t *lock); void __lockfunc _write_unlock_irq(rwlock_t *lock);
void __lockfunc _write_unlock_bh(rwlock_t *lock); void __lockfunc _write_unlock_bh(rwlock_t *lock);
int __lockfunc _spin_trylock_bh(spinlock_t *lock); int __lockfunc _spin_trylock_bh(spinlock_t *lock);
int in_lock_functions(unsigned long addr);
extern unsigned long __lock_text_start;
extern unsigned long __lock_text_end;
#else #else
#define in_lock_functions(ADDR) 0
#if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK) #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK)
# define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) # define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
# define ATOMIC_DEC_AND_LOCK # define ATOMIC_DEC_AND_LOCK
...@@ -405,11 +405,6 @@ do { \ ...@@ -405,11 +405,6 @@ do { \
/* Where's read_trylock? */ /* Where's read_trylock? */
#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
void __preempt_spin_lock(spinlock_t *lock);
void __preempt_write_lock(rwlock_t *lock);
#endif
#define spin_lock(lock) _spin_lock(lock) #define spin_lock(lock) _spin_lock(lock)
#define write_lock(lock) _write_lock(lock) #define write_lock(lock) _write_lock(lock)
#define read_lock(lock) _read_lock(lock) #define read_lock(lock) _read_lock(lock)
......
...@@ -4654,8 +4654,9 @@ int in_sched_functions(unsigned long addr) ...@@ -4654,8 +4654,9 @@ int in_sched_functions(unsigned long addr)
{ {
/* Linker adds these: start and end of __sched functions */ /* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[]; extern char __sched_text_start[], __sched_text_end[];
return addr >= (unsigned long)__sched_text_start return in_lock_functions(addr) ||
&& addr < (unsigned long)__sched_text_end; (addr >= (unsigned long)__sched_text_start
&& addr < (unsigned long)__sched_text_end);
} }
void __init sched_init(void) void __init sched_init(void)
...@@ -4747,49 +4748,3 @@ void __might_sleep(char *file, int line) ...@@ -4747,49 +4748,3 @@ void __might_sleep(char *file, int line)
} }
EXPORT_SYMBOL(__might_sleep); EXPORT_SYMBOL(__might_sleep);
#endif #endif
#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
/*
* This could be a long-held lock. If another CPU holds it for a long time,
* and that CPU is not asked to reschedule then *this* CPU will spin on the
* lock for a long time, even if *this* CPU is asked to reschedule.
*
* So what we do here, in the slow (contended) path is to spin on the lock by
* hand while permitting preemption.
*
* Called inside preempt_disable().
*/
void __sched __preempt_spin_lock(spinlock_t *lock)
{
if (preempt_count() > 1) {
_raw_spin_lock(lock);
return;
}
do {
preempt_enable();
while (spin_is_locked(lock))
cpu_relax();
preempt_disable();
} while (!_raw_spin_trylock(lock));
}
EXPORT_SYMBOL(__preempt_spin_lock);
void __sched __preempt_write_lock(rwlock_t *lock)
{
if (preempt_count() > 1) {
_raw_write_lock(lock);
return;
}
do {
preempt_enable();
while (rwlock_is_locked(lock))
cpu_relax();
preempt_disable();
} while (!_raw_write_trylock(lock));
}
EXPORT_SYMBOL(__preempt_write_lock);
#endif /* defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) */
...@@ -33,7 +33,32 @@ int __lockfunc _write_trylock(rwlock_t *lock) ...@@ -33,7 +33,32 @@ int __lockfunc _write_trylock(rwlock_t *lock)
} }
EXPORT_SYMBOL(_write_trylock); EXPORT_SYMBOL(_write_trylock);
#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) #ifdef CONFIG_PREEMPT
/*
* This could be a long-held lock. If another CPU holds it for a long time,
* and that CPU is not asked to reschedule then *this* CPU will spin on the
* lock for a long time, even if *this* CPU is asked to reschedule.
*
* So what we do here, in the slow (contended) path is to spin on the lock by
* hand while permitting preemption.
*
* Called inside preempt_disable().
*/
static inline void __preempt_spin_lock(spinlock_t *lock)
{
if (preempt_count() > 1) {
_raw_spin_lock(lock);
return;
}
do {
preempt_enable();
while (spin_is_locked(lock))
cpu_relax();
preempt_disable();
} while (!_raw_spin_trylock(lock));
}
void __lockfunc _spin_lock(spinlock_t *lock) void __lockfunc _spin_lock(spinlock_t *lock)
{ {
preempt_disable(); preempt_disable();
...@@ -41,6 +66,21 @@ void __lockfunc _spin_lock(spinlock_t *lock) ...@@ -41,6 +66,21 @@ void __lockfunc _spin_lock(spinlock_t *lock)
__preempt_spin_lock(lock); __preempt_spin_lock(lock);
} }
static inline void __preempt_write_lock(rwlock_t *lock)
{
if (preempt_count() > 1) {
_raw_write_lock(lock);
return;
}
do {
preempt_enable();
while (rwlock_is_locked(lock))
cpu_relax();
preempt_disable();
} while (!_raw_write_trylock(lock));
}
void __lockfunc _write_lock(rwlock_t *lock) void __lockfunc _write_lock(rwlock_t *lock)
{ {
preempt_disable(); preempt_disable();
...@@ -256,3 +296,13 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock) ...@@ -256,3 +296,13 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock)
return 0; return 0;
} }
EXPORT_SYMBOL(_spin_trylock_bh); EXPORT_SYMBOL(_spin_trylock_bh);
int in_lock_functions(unsigned long addr)
{
/* Linker adds these: start and end of __lockfunc functions */
extern char __lock_text_start[], __lock_text_end[];
return addr >= (unsigned long)__lock_text_start
&& addr < (unsigned long)__lock_text_end;
}
EXPORT_SYMBOL(in_lock_functions);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment