Commit d257b778 authored by Robert Love's avatar Robert Love Committed by Dave Kleikamp

[PATCH] kernel preemption bits (1/2)

Two big ouches in x86 entry.S:

(1) Up until 2.5.20, we were not properly reading the irq_stat
    and bh_count values from the right CPU.  Brian Gerst sent you
    a patch to fix this.  This raises the question: why was this not
    a problem?  Seems we do not need this check at all as having a
    nonzero irq_star or bh_count implies having a nonzero preempt_count,
    which we test for above.  Thus this patch removes those tests and
    the related defines.

(2) What if it is possible to preempt even with interrupts disabled?
    Turns out it is.  Consider if we take an exception (say, for a
    TLB miss) and enter resume_kernel and preempt.  Even if interrupts
    are disabled, an exception can occur and end up in resume_kernel.
    We need to check to make sure interrupts are not off, to ensure
    we are not coming off an unmasked exception.

Even with the added check from issue #2, we have less code after #1 so
we can walk away with a bugfix and an optimization here. ;)

Thanks to George Anzinger, with whom I actually had #2 bite me, and who
helped with these issues.
parent 826267d3
...@@ -70,34 +70,15 @@ IF_MASK = 0x00000200 ...@@ -70,34 +70,15 @@ IF_MASK = 0x00000200
NT_MASK = 0x00004000 NT_MASK = 0x00004000
VM_MASK = 0x00020000 VM_MASK = 0x00020000
/*
* These are offsets into the irq_stat structure
* There is one per cpu and it is aligned to 32
* byte boundry (we put that here as a shift count)
*/
irq_array_shift = CONFIG_X86_L1_CACHE_SHIFT
local_irq_count = 4
local_bh_count = 8
#ifdef CONFIG_SMP
#define GET_CPU_IDX \
movl TI_CPU(%ebx), %eax; \
shll $irq_array_shift, %eax
#define CPU_IDX (,%eax)
#else
#define GET_CPU_IDX
#define CPU_IDX
#endif
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
#define preempt_stop cli #define preempt_stop cli
#define INC_PRE_COUNT(reg) incl TI_PRE_COUNT(reg); #define INC_PRE_COUNT(reg) incl TI_PRE_COUNT(reg);
#define DEC_PRE_COUNT(reg) decl TI_PRE_COUNT(reg); #define DEC_PRE_COUNT(reg) decl TI_PRE_COUNT(reg);
#else #else
#define preempt_stop #define preempt_stop
#define INC_PRE_COUNT(reg) #define INC_PRE_COUNT(reg)
#define DEC_PRE_COUNT(reg) #define DEC_PRE_COUNT(reg)
#define resume_kernel restore_all #define resume_kernel restore_all
#endif #endif
#define SAVE_ALL \ #define SAVE_ALL \
...@@ -228,15 +209,13 @@ ENTRY(resume_userspace) ...@@ -228,15 +209,13 @@ ENTRY(resume_userspace)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
ENTRY(resume_kernel) ENTRY(resume_kernel)
cmpl $0,TI_PRE_COUNT(%ebx) cmpl $0,TI_PRE_COUNT(%ebx) # non-zero preempt_count ?
jnz restore_all jnz restore_all
movl TI_FLAGS(%ebx), %ecx movl TI_FLAGS(%ebx), %ecx # need_resched set ?
testb $_TIF_NEED_RESCHED, %cl testb $_TIF_NEED_RESCHED, %cl
jz restore_all jz restore_all
GET_CPU_IDX testl $IF_MASK,EFLAGS(%esp) # interrupts off (execption path) ?
movl irq_stat+local_bh_count CPU_IDX, %ecx jz restore_all
addl irq_stat+local_irq_count CPU_IDX, %ecx
jnz restore_all
movl $PREEMPT_ACTIVE,TI_PRE_COUNT(%ebx) movl $PREEMPT_ACTIVE,TI_PRE_COUNT(%ebx)
sti sti
call schedule call schedule
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment