Commit 0ed4dd24 authored by Ingo Molnar's avatar Ingo Molnar

- rq-lock optimization in the preemption case, from Robert Love, plus some more cleanups.

parent 2b75b535
...@@ -157,6 +157,12 @@ do { \ ...@@ -157,6 +157,12 @@ do { \
preempt_enable(); \ preempt_enable(); \
} while (0) } while (0)
#define spin_unlock_no_resched(lock) \
do { \
_raw_spin_unlock(lock); \
preempt_enable_no_resched(); \
} while (0)
#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);}) #define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);})
#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();}) #define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();})
#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);}) #define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);})
...@@ -166,20 +172,21 @@ do { \ ...@@ -166,20 +172,21 @@ do { \
#else #else
#define preempt_get_count() (0) #define preempt_get_count() (0)
#define preempt_disable() do { } while (0) #define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do {} while(0) #define preempt_enable_no_resched() do {} while(0)
#define preempt_enable() do { } while (0) #define preempt_enable() do { } while (0)
#define spin_lock(lock) _raw_spin_lock(lock) #define spin_lock(lock) _raw_spin_lock(lock)
#define spin_trylock(lock) _raw_spin_trylock(lock) #define spin_trylock(lock) _raw_spin_trylock(lock)
#define spin_unlock(lock) _raw_spin_unlock(lock) #define spin_unlock(lock) _raw_spin_unlock(lock)
#define spin_unlock_no_resched(lock) _raw_spin_unlock(lock)
#define read_lock(lock) _raw_read_lock(lock)
#define read_unlock(lock) _raw_read_unlock(lock) #define read_lock(lock) _raw_read_lock(lock)
#define write_lock(lock) _raw_write_lock(lock) #define read_unlock(lock) _raw_read_unlock(lock)
#define write_unlock(lock) _raw_write_unlock(lock) #define write_lock(lock) _raw_write_lock(lock)
#define write_trylock(lock) _raw_write_trylock(lock) #define write_unlock(lock) _raw_write_unlock(lock)
#define write_trylock(lock) _raw_write_trylock(lock)
#endif #endif
/* "lock on reference count zero" */ /* "lock on reference count zero" */
......
...@@ -152,17 +152,21 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned; ...@@ -152,17 +152,21 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
#define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define rt_task(p) ((p)->prio < MAX_RT_PRIO) #define rt_task(p) ((p)->prio < MAX_RT_PRIO)
/*
* task_rq_lock - lock the runqueue a given task resides on and disable
* interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
*/
static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
{ {
struct runqueue *rq; struct runqueue *rq;
repeat_lock_task: repeat_lock_task:
preempt_disable(); local_irq_save(*flags);
rq = task_rq(p); rq = task_rq(p);
spin_lock_irqsave(&rq->lock, *flags); spin_lock(&rq->lock);
if (unlikely(rq != task_rq(p))) { if (unlikely(rq != task_rq(p))) {
spin_unlock_irqrestore(&rq->lock, *flags); spin_unlock_irqrestore(&rq->lock, *flags);
preempt_enable();
goto repeat_lock_task; goto repeat_lock_task;
} }
return rq; return rq;
...@@ -171,7 +175,23 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) ...@@ -171,7 +175,23 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
{ {
spin_unlock_irqrestore(&rq->lock, *flags); spin_unlock_irqrestore(&rq->lock, *flags);
preempt_enable(); }
/*
* rq_lock - lock a given runqueue and disable interrupts.
*/
static inline runqueue_t *rq_lock(runqueue_t *rq)
{
local_irq_disable();
rq = this_rq();
spin_lock(&rq->lock);
return rq;
}
static inline void rq_unlock(runqueue_t *rq)
{
spin_unlock(&rq->lock);
local_irq_enable();
} }
/* /*
...@@ -364,9 +384,7 @@ void wake_up_forked_process(task_t * p) ...@@ -364,9 +384,7 @@ void wake_up_forked_process(task_t * p)
{ {
runqueue_t *rq; runqueue_t *rq;
preempt_disable(); rq = rq_lock(rq);
rq = this_rq();
spin_lock_irq(&rq->lock);
p->state = TASK_RUNNING; p->state = TASK_RUNNING;
if (!rt_task(p)) { if (!rt_task(p)) {
...@@ -382,8 +400,7 @@ void wake_up_forked_process(task_t * p) ...@@ -382,8 +400,7 @@ void wake_up_forked_process(task_t * p)
p->thread_info->cpu = smp_processor_id(); p->thread_info->cpu = smp_processor_id();
activate_task(p, rq); activate_task(p, rq);
spin_unlock_irq(&rq->lock); rq_unlock(rq);
preempt_enable();
} }
/* /*
...@@ -1367,8 +1384,7 @@ asmlinkage long sys_sched_yield(void) ...@@ -1367,8 +1384,7 @@ asmlinkage long sys_sched_yield(void)
runqueue_t *rq; runqueue_t *rq;
prio_array_t *array; prio_array_t *array;
preempt_disable(); rq = rq_lock(rq);
rq = this_rq();
/* /*
* Decrease the yielding task's priority by one, to avoid * Decrease the yielding task's priority by one, to avoid
...@@ -1378,7 +1394,6 @@ asmlinkage long sys_sched_yield(void) ...@@ -1378,7 +1394,6 @@ asmlinkage long sys_sched_yield(void)
* If priority is already MAX_PRIO-1 then we still * If priority is already MAX_PRIO-1 then we still
* roundrobin the task within the runlist. * roundrobin the task within the runlist.
*/ */
spin_lock_irq(&rq->lock);
array = current->array; array = current->array;
/* /*
* If the task has reached maximum priority (or is a RT task) * If the task has reached maximum priority (or is a RT task)
...@@ -1395,8 +1410,7 @@ asmlinkage long sys_sched_yield(void) ...@@ -1395,8 +1410,7 @@ asmlinkage long sys_sched_yield(void)
list_add_tail(&current->run_list, array->queue + current->prio); list_add_tail(&current->run_list, array->queue + current->prio);
__set_bit(current->prio, array->bitmap); __set_bit(current->prio, array->bitmap);
} }
spin_unlock(&rq->lock); spin_unlock_no_resched(&rq->lock);
preempt_enable_no_resched();
schedule(); schedule();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment