Commit c35bd557 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] sh: preempt fixes

From: Paul Mundt <lethal@linux-sh.org>

This fixes up a number of other outstanding preemption issues in the sh
backend (in addition to the ones already fixed in previous patches).

Patch from Kaz Kojima.
parent 41dd42aa
/* $Id: entry.S,v 1.33 2003/11/22 15:39:51 lethal Exp $ /* $Id: entry.S,v 1.34 2004/01/13 05:52:11 kkojima Exp $
* *
* linux/arch/sh/entry.S * linux/arch/sh/entry.S
* *
...@@ -351,18 +351,22 @@ ret_from_irq: ...@@ -351,18 +351,22 @@ ret_from_irq:
GET_THREAD_INFO(r8) GET_THREAD_INFO(r8)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
bra resume_userspace
nop
ENTRY(resume_kernel) ENTRY(resume_kernel)
mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count
tst r0, r0 tst r0, r0
bf restore_all bf noresched
need_resched: need_resched:
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
tst #_TIF_NEED_RESCHED, r0 ! need_resched set? tst #_TIF_NEED_RESCHED, r0 ! need_resched set?
bt restore_all bt noresched
stc sr, r0 ! interrupts disabled? mov #OFF_SR, r0
tst #0xf0, r0 mov.l @(r0,r15), r0 ! get status register
bf restore_all and #0xf0, r0 ! interrupts off (exception path)?
cmp/eq #0xf0, r0
bt noresched
mov.l 1f, r0 mov.l 1f, r0
mov.l r0, @(TI_PRE_COUNT,r8) mov.l r0, @(TI_PRE_COUNT,r8)
...@@ -377,6 +381,9 @@ need_resched: ...@@ -377,6 +381,9 @@ need_resched:
bra need_resched bra need_resched
nop nop
noresched:
bra restore_all
nop
.align 2 .align 2
1: .long PREEMPT_ACTIVE 1: .long PREEMPT_ACTIVE
...@@ -525,7 +532,7 @@ ENTRY(system_call) ...@@ -525,7 +532,7 @@ ENTRY(system_call)
! Is the trap argument >= 0x20? (TRA will be >= 0x80) ! Is the trap argument >= 0x20? (TRA will be >= 0x80)
mov #0x7f, r9 mov #0x7f, r9
cmp/hi r9, r8 cmp/hi r9, r8
bt/s debug_trap bt/s 0f
mov #OFF_TRA, r9 mov #OFF_TRA, r9
add r15, r9 add r15, r9
! !
...@@ -543,6 +550,10 @@ syscall_badsys: ! Bad syscall number ...@@ -543,6 +550,10 @@ syscall_badsys: ! Bad syscall number
bra resume_userspace bra resume_userspace
mov.l r0, @(OFF_R0,r15) ! Return value mov.l r0, @(OFF_R0,r15) ! Return value
! !
0:
bra debug_trap
nop
!
good_system_call: ! Good syscall number good_system_call: ! Good syscall number
mov.l @(TI_FLAGS,r8), r8 mov.l @(TI_FLAGS,r8), r8
mov #_TIF_SYSCALL_TRACE, r10 mov #_TIF_SYSCALL_TRACE, r10
...@@ -632,12 +643,8 @@ skip_restore: ...@@ -632,12 +643,8 @@ skip_restore:
! !
! Calculate new SR value ! Calculate new SR value
mov k3, k2 ! original SR value mov k3, k2 ! original SR value
mov.l 8f, k1
stc sr, k0
and k1, k0 ! Get current FD-bit
mov.l 9f, k1 mov.l 9f, k1
and k1, k2 ! Mask orignal SR value and k1, k2 ! Mask orignal SR value
or k0, k2 ! Inherit current FD-bit
! !
mov k3, k0 ! Calculate IMASK-bits mov k3, k0 ! Calculate IMASK-bits
shlr2 k0 shlr2 k0
...@@ -668,8 +675,7 @@ skip_restore: ...@@ -668,8 +675,7 @@ skip_restore:
4: .long do_syscall_trace 4: .long do_syscall_trace
5: .long 0x00001000 ! DSP 5: .long 0x00001000 ! DSP
7: .long 0x30000000 7: .long 0x30000000
8: .long 0x00008000 ! FD 9:
9: .long 0xffff7f0f ! ~(IMASK+FD)
__INV_IMASK: __INV_IMASK:
.long 0xffffff0f ! ~(IMASK) .long 0xffffff0f ! ~(IMASK)
...@@ -838,7 +844,7 @@ skip_save: ...@@ -838,7 +844,7 @@ skip_save:
.align 2 .align 2
1: .long 0x00001000 ! DSP=1 1: .long 0x00001000 ! DSP=1
2: .long 0x000000f0 ! FD=0, IMASK=15 2: .long 0x000080f0 ! FD=1, IMASK=15
3: .long 0xcfffffff ! RB=0, BL=0 3: .long 0xcfffffff ! RB=0, BL=0
4: .long exception_handling_table 4: .long exception_handling_table
......
/* $Id: irq.c,v 1.19 2004/01/10 01:25:32 lethal Exp $ /* $Id: irq.c,v 1.20 2004/01/13 05:52:11 kkojima Exp $
* *
* linux/arch/sh/kernel/irq.c * linux/arch/sh/kernel/irq.c
* *
...@@ -318,6 +318,16 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, ...@@ -318,6 +318,16 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
irq_enter(); irq_enter();
#ifdef CONFIG_PREEMPT
/*
* At this point we're now about to actually call handlers,
* and interrupts might get reenabled during them... bump
* preempt_count to prevent any preemption while the handler
* called here is pending...
*/
preempt_disable();
#endif
/* Get IRQ number */ /* Get IRQ number */
asm volatile("stc r2_bank, %0\n\t" asm volatile("stc r2_bank, %0\n\t"
"shlr2 %0\n\t" "shlr2 %0\n\t"
...@@ -393,6 +403,15 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, ...@@ -393,6 +403,15 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
irq_exit(); irq_exit();
#ifdef CONFIG_PREEMPT
/*
* We're done with the handlers, interrupts should be
* currently disabled; decrement preempt_count now so
* as we return preemption may be allowed...
*/
preempt_enable_no_resched();
#endif
return 1; return 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment