Commit 371fd7e7 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Add enqueue/dequeue flags

In order to reduce the dependency on TASK_WAKING rework the enqueue
interface to support a proper flags field.

Replace the int wakeup, bool head arguments with an int flags argument
and create the following flags:

  ENQUEUE_WAKEUP - the enqueue is a wakeup of a sleeping task,
  ENQUEUE_WAKING - the enqueue has relative vruntime due to
                   having sched_class::task_waking() called,
  ENQUEUE_HEAD - the waking task should be places on the head
                 of the priority queue (where appropriate).

For symmetry also convert sched_class::dequeue() to a flags scheme.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent cc87f76a
...@@ -1032,12 +1032,17 @@ struct sched_domain; ...@@ -1032,12 +1032,17 @@ struct sched_domain;
#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ #define WF_SYNC 0x01 /* waker goes to sleep after wakup */
#define WF_FORK 0x02 /* child wakeup after fork */ #define WF_FORK 0x02 /* child wakeup after fork */
#define ENQUEUE_WAKEUP 1
#define ENQUEUE_WAKING 2
#define ENQUEUE_HEAD 4
#define DEQUEUE_SLEEP 1
struct sched_class { struct sched_class {
const struct sched_class *next; const struct sched_class *next;
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup, void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
bool head); void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
void (*yield_task) (struct rq *rq); void (*yield_task) (struct rq *rq);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
......
...@@ -1877,44 +1877,43 @@ static void update_avg(u64 *avg, u64 sample) ...@@ -1877,44 +1877,43 @@ static void update_avg(u64 *avg, u64 sample)
*avg += diff >> 3; *avg += diff >> 3;
} }
static void static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
{ {
update_rq_clock(rq); update_rq_clock(rq);
sched_info_queued(p); sched_info_queued(p);
p->sched_class->enqueue_task(rq, p, wakeup, head); p->sched_class->enqueue_task(rq, p, flags);
p->se.on_rq = 1; p->se.on_rq = 1;
} }
static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{ {
update_rq_clock(rq); update_rq_clock(rq);
sched_info_dequeued(p); sched_info_dequeued(p);
p->sched_class->dequeue_task(rq, p, sleep); p->sched_class->dequeue_task(rq, p, flags);
p->se.on_rq = 0; p->se.on_rq = 0;
} }
/* /*
* activate_task - move a task to the runqueue. * activate_task - move a task to the runqueue.
*/ */
static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) static void activate_task(struct rq *rq, struct task_struct *p, int flags)
{ {
if (task_contributes_to_load(p)) if (task_contributes_to_load(p))
rq->nr_uninterruptible--; rq->nr_uninterruptible--;
enqueue_task(rq, p, wakeup, false); enqueue_task(rq, p, flags);
inc_nr_running(rq); inc_nr_running(rq);
} }
/* /*
* deactivate_task - remove a task from the runqueue. * deactivate_task - remove a task from the runqueue.
*/ */
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{ {
if (task_contributes_to_load(p)) if (task_contributes_to_load(p))
rq->nr_uninterruptible++; rq->nr_uninterruptible++;
dequeue_task(rq, p, sleep); dequeue_task(rq, p, flags);
dec_nr_running(rq); dec_nr_running(rq);
} }
...@@ -2353,6 +2352,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, ...@@ -2353,6 +2352,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
{ {
int cpu, orig_cpu, this_cpu, success = 0; int cpu, orig_cpu, this_cpu, success = 0;
unsigned long flags; unsigned long flags;
unsigned long en_flags = ENQUEUE_WAKEUP;
struct rq *rq; struct rq *rq;
this_cpu = get_cpu(); this_cpu = get_cpu();
...@@ -2386,8 +2386,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, ...@@ -2386,8 +2386,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
} }
p->state = TASK_WAKING; p->state = TASK_WAKING;
if (p->sched_class->task_waking) if (p->sched_class->task_waking) {
p->sched_class->task_waking(rq, p); p->sched_class->task_waking(rq, p);
en_flags |= ENQUEUE_WAKING;
}
cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
if (cpu != orig_cpu) if (cpu != orig_cpu)
...@@ -2432,7 +2434,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, ...@@ -2432,7 +2434,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
schedstat_inc(p, se.statistics.nr_wakeups_local); schedstat_inc(p, se.statistics.nr_wakeups_local);
else else
schedstat_inc(p, se.statistics.nr_wakeups_remote); schedstat_inc(p, se.statistics.nr_wakeups_remote);
activate_task(rq, p, 1); activate_task(rq, p, en_flags);
success = 1; success = 1;
out_running: out_running:
...@@ -3623,7 +3625,7 @@ asmlinkage void __sched schedule(void) ...@@ -3623,7 +3625,7 @@ asmlinkage void __sched schedule(void)
if (unlikely(signal_pending_state(prev->state, prev))) if (unlikely(signal_pending_state(prev->state, prev)))
prev->state = TASK_RUNNING; prev->state = TASK_RUNNING;
else else
deactivate_task(rq, prev, 1); deactivate_task(rq, prev, DEQUEUE_SLEEP);
switch_count = &prev->nvcsw; switch_count = &prev->nvcsw;
} }
...@@ -4193,7 +4195,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) ...@@ -4193,7 +4195,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
if (running) if (running)
p->sched_class->set_curr_task(rq); p->sched_class->set_curr_task(rq);
if (on_rq) { if (on_rq) {
enqueue_task(rq, p, 0, oldprio < prio); enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
check_class_changed(rq, p, prev_class, oldprio, running); check_class_changed(rq, p, prev_class, oldprio, running);
} }
...@@ -4236,7 +4238,7 @@ void set_user_nice(struct task_struct *p, long nice) ...@@ -4236,7 +4238,7 @@ void set_user_nice(struct task_struct *p, long nice)
delta = p->prio - old_prio; delta = p->prio - old_prio;
if (on_rq) { if (on_rq) {
enqueue_task(rq, p, 0, false); enqueue_task(rq, p, 0);
/* /*
* If the task increased its priority or is running and * If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU: * lowered its priority, then reschedule its CPU:
...@@ -8180,7 +8182,7 @@ void sched_move_task(struct task_struct *tsk) ...@@ -8180,7 +8182,7 @@ void sched_move_task(struct task_struct *tsk)
if (unlikely(running)) if (unlikely(running))
tsk->sched_class->set_curr_task(rq); tsk->sched_class->set_curr_task(rq);
if (on_rq) if (on_rq)
enqueue_task(rq, tsk, 0, false); enqueue_task(rq, tsk, 0);
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
} }
......
...@@ -757,9 +757,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) ...@@ -757,9 +757,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
se->vruntime = vruntime; se->vruntime = vruntime;
} }
#define ENQUEUE_WAKEUP 1
#define ENQUEUE_MIGRATE 2
static void static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{ {
...@@ -767,7 +764,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) ...@@ -767,7 +764,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update the normalized vruntime before updating min_vruntime * Update the normalized vruntime before updating min_vruntime
* through callig update_curr(). * through callig update_curr().
*/ */
if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE)) if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
se->vruntime += cfs_rq->min_vruntime; se->vruntime += cfs_rq->min_vruntime;
/* /*
...@@ -803,7 +800,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -803,7 +800,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
} }
static void static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{ {
/* /*
* Update run-time statistics of the 'current'. * Update run-time statistics of the 'current'.
...@@ -811,7 +808,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) ...@@ -811,7 +808,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
update_curr(cfs_rq); update_curr(cfs_rq);
update_stats_dequeue(cfs_rq, se); update_stats_dequeue(cfs_rq, se);
if (sleep) { if (flags & DEQUEUE_SLEEP) {
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
if (entity_is_task(se)) { if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se); struct task_struct *tsk = task_of(se);
...@@ -836,7 +833,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) ...@@ -836,7 +833,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
* update can refer to the ->curr item and we need to reflect this * update can refer to the ->curr item and we need to reflect this
* movement in our normalized position. * movement in our normalized position.
*/ */
if (!sleep) if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime; se->vruntime -= cfs_rq->min_vruntime;
} }
...@@ -1045,16 +1042,10 @@ static inline void hrtick_update(struct rq *rq) ...@@ -1045,16 +1042,10 @@ static inline void hrtick_update(struct rq *rq)
* then put the task into the rbtree: * then put the task into the rbtree:
*/ */
static void static void
enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head) enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{ {
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
int flags = 0;
if (wakeup)
flags |= ENQUEUE_WAKEUP;
if (p->state == TASK_WAKING)
flags |= ENQUEUE_MIGRATE;
for_each_sched_entity(se) { for_each_sched_entity(se) {
if (se->on_rq) if (se->on_rq)
...@@ -1072,18 +1063,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head) ...@@ -1072,18 +1063,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
* decreased. We remove the task from the rbtree and * decreased. We remove the task from the rbtree and
* update the fair scheduling stats: * update the fair scheduling stats:
*/ */
static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{ {
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
for_each_sched_entity(se) { for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se); cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, sleep); dequeue_entity(cfs_rq, se, flags);
/* Don't dequeue parent if it has other entities besides us */ /* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) if (cfs_rq->load.weight)
break; break;
sleep = 1; flags |= DEQUEUE_SLEEP;
} }
hrtick_update(rq); hrtick_update(rq);
......
...@@ -33,7 +33,7 @@ static struct task_struct *pick_next_task_idle(struct rq *rq) ...@@ -33,7 +33,7 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
* message if some code attempts to do it: * message if some code attempts to do it:
*/ */
static void static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
{ {
raw_spin_unlock_irq(&rq->lock); raw_spin_unlock_irq(&rq->lock);
printk(KERN_ERR "bad: scheduling from the idle thread!\n"); printk(KERN_ERR "bad: scheduling from the idle thread!\n");
......
...@@ -888,20 +888,20 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) ...@@ -888,20 +888,20 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
* Adding/removing a task to/from a priority array: * Adding/removing a task to/from a priority array:
*/ */
static void static void
enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head) enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{ {
struct sched_rt_entity *rt_se = &p->rt; struct sched_rt_entity *rt_se = &p->rt;
if (wakeup) if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0; rt_se->timeout = 0;
enqueue_rt_entity(rt_se, head); enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p); enqueue_pushable_task(rq, p);
} }
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{ {
struct sched_rt_entity *rt_se = &p->rt; struct sched_rt_entity *rt_se = &p->rt;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment