Commit 8b2cd421 authored by Russell King's avatar Russell King

[ARM] Enable IRQs over context switches

Disabling IRQs over context switches causes unreasonable IRQ latency
on VIVT cached machines, so provide our own prepare_arch_switch(),
finish_arch_switch() and task_running() implementations.  We provide
an optimised UP version so we don't end up needlessly incrementing
and decrementing the preempt count.
parent fe9f64b3
...@@ -137,22 +137,48 @@ extern unsigned int user_debug; ...@@ -137,22 +137,48 @@ extern unsigned int user_debug;
#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
#define prepare_to_switch() do { } while(0) #ifdef CONFIG_SMP
/*
* Define our own context switch locking. This allows us to enable
* interrupts over the context switch, otherwise we end up with high
* interrupt latency. The real problem area is switch_mm() which may
* do a full cache flush.
*/
#define prepare_arch_switch(rq,next) \
do { \
spin_lock(&(next)->switch_lock); \
spin_unlock_irq(&(rq)->lock); \
} while (0)
#define finish_arch_switch(rq,prev) \
spin_unlock(&(prev)->switch_lock)
#define task_running(rq,p) \
((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
#else
/*
* Our UP-case is more simple, but we assume knowledge of how
* spin_unlock_irq() and friends are implemented. This avoids
* us needlessly decrementing and incrementing the preempt count.
*/
#define prepare_arch_switch(rq,next) local_irq_enable()
#define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock)
#define task_running(rq,p) ((rq)->curr == (p))
#endif
/* /*
* switch_to(prev, next) should switch from task `prev' to `next' * switch_to(prev, next) should switch from task `prev' to `next'
* `prev' will never be the same as `next'. * `prev' will never be the same as `next'. schedule() itself
* The `mb' is to tell GCC not to cache `current' across this call. * contains the memory barrier to tell GCC not to cache `current'.
*/ */
struct thread_info; struct thread_info;
struct task_struct; struct task_struct;
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
#define switch_to(prev,next,last) \ #define switch_to(prev,next,last) \
do { \ do { \
last = __switch_to(prev,prev->thread_info,next->thread_info); \ last = __switch_to(prev,prev->thread_info,next->thread_info); \
mb(); \ } while (0)
} while (0)
/* /*
* CPU interrupt mask handling. * CPU interrupt mask handling.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment