Commit aeb73b04 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: clean up new task placement

clean up new task placement.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarMike Galbraith <efault@gmx.de>
parent 2e09bf55
...@@ -199,6 +199,21 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) ...@@ -199,6 +199,21 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node); return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
} }
static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
struct sched_entity *se = NULL;
struct rb_node *parent;
while (*link) {
parent = *link;
se = rb_entry(parent, struct sched_entity, run_node);
link = &parent->rb_right;
}
return se;
}
/************************************************************** /**************************************************************
* Scheduling class statistics methods: * Scheduling class statistics methods:
*/ */
...@@ -530,27 +545,40 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -530,27 +545,40 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
} }
static void static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{ {
/* struct sched_entity *last = __pick_last_entity(cfs_rq);
* Update the fair clock.
*/
update_curr(cfs_rq);
if (wakeup) {
u64 min_runtime, latency; u64 min_runtime, latency;
min_runtime = cfs_rq->min_vruntime; min_runtime = cfs_rq->min_vruntime;
if (last) {
min_runtime += last->vruntime;
min_runtime >>= 1;
if (initial && sched_feat(START_DEBIT))
min_runtime += sysctl_sched_latency/2; min_runtime += sysctl_sched_latency/2;
}
if (sched_feat(NEW_FAIR_SLEEPERS)) { if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) {
latency = calc_weighted(sysctl_sched_latency, se); latency = sysctl_sched_latency;
if (min_runtime > latency) if (min_runtime > latency)
min_runtime -= latency; min_runtime -= latency;
else
min_runtime = 0;
} }
se->vruntime = max(se->vruntime, min_runtime); se->vruntime = max(se->vruntime, min_runtime);
}
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
{
/*
* Update the fair clock.
*/
update_curr(cfs_rq);
if (wakeup) {
place_entity(cfs_rq, se, 0);
enqueue_sleeper(cfs_rq, se); enqueue_sleeper(cfs_rq, se);
} }
...@@ -1033,8 +1061,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) ...@@ -1033,8 +1061,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
sched_info_queued(p); sched_info_queued(p);
update_curr(cfs_rq); update_curr(cfs_rq);
se->vruntime = cfs_rq->min_vruntime; place_entity(cfs_rq, se, 1);
update_stats_enqueue(cfs_rq, se);
/* /*
* The first wait is dominated by the child-runs-first logic, * The first wait is dominated by the child-runs-first logic,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment