Commit 67e9fb2a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: add vslice

add vslice: the load-dependent "virtual slice" a task should
run ideally, so that the observed latency stays within the
sched_latency window.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 1aa4731e
...@@ -908,6 +908,7 @@ struct sched_entity { ...@@ -908,6 +908,7 @@ struct sched_entity {
u64 sum_exec_runtime; u64 sum_exec_runtime;
u64 vruntime; u64 vruntime;
u64 prev_sum_exec_runtime; u64 prev_sum_exec_runtime;
u64 last_min_vruntime;
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
u64 wait_start; u64 wait_start;
......
...@@ -1615,6 +1615,7 @@ static void __sched_fork(struct task_struct *p) ...@@ -1615,6 +1615,7 @@ static void __sched_fork(struct task_struct *p)
p->se.exec_start = 0; p->se.exec_start = 0;
p->se.sum_exec_runtime = 0; p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0; p->se.prev_sum_exec_runtime = 0;
p->se.last_min_vruntime = 0;
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0; p->se.wait_start = 0;
...@@ -6495,6 +6496,7 @@ static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) ...@@ -6495,6 +6496,7 @@ static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
cfs_rq->rq = rq; cfs_rq->rq = rq;
#endif #endif
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
} }
void __init sched_init(void) void __init sched_init(void)
......
...@@ -243,6 +243,15 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -243,6 +243,15 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return period; return period;
} }
static u64 __sched_vslice(unsigned long nr_running)
{
u64 period = __sched_period(nr_running);
do_div(period, nr_running);
return period;
}
/* /*
* Update the current task's runtime statistics. Skip current tasks that * Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class. * are not in our scheduling class.
...@@ -441,32 +450,33 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -441,32 +450,33 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
static void static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{ {
u64 min_runtime, latency; u64 vruntime;
min_runtime = cfs_rq->min_vruntime; vruntime = cfs_rq->min_vruntime;
if (sched_feat(USE_TREE_AVG)) { if (sched_feat(USE_TREE_AVG)) {
struct sched_entity *last = __pick_last_entity(cfs_rq); struct sched_entity *last = __pick_last_entity(cfs_rq);
if (last) { if (last) {
min_runtime = __pick_next_entity(cfs_rq)->vruntime; vruntime += last->vruntime;
min_runtime += last->vruntime; vruntime >>= 1;
min_runtime >>= 1;
} }
} else if (sched_feat(APPROX_AVG)) } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
min_runtime += sysctl_sched_latency/2; vruntime += __sched_vslice(cfs_rq->nr_running)/2;
if (initial && sched_feat(START_DEBIT)) if (initial && sched_feat(START_DEBIT))
min_runtime += sched_slice(cfs_rq, se); vruntime += __sched_vslice(cfs_rq->nr_running + 1);
if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) { if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) {
latency = sysctl_sched_latency; s64 latency = cfs_rq->min_vruntime - se->last_min_vruntime;
if (min_runtime > latency) if (latency < 0 || !cfs_rq->nr_running)
min_runtime -= latency; latency = 0;
else else
min_runtime = 0; latency = min_t(s64, latency, sysctl_sched_latency);
vruntime -= latency;
} }
se->vruntime = max(se->vruntime, min_runtime); se->vruntime = vruntime;
} }
static void static void
...@@ -478,6 +488,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) ...@@ -478,6 +488,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
update_curr(cfs_rq); update_curr(cfs_rq);
if (wakeup) { if (wakeup) {
/* se->vruntime += cfs_rq->min_vruntime; */
place_entity(cfs_rq, se, 0); place_entity(cfs_rq, se, 0);
enqueue_sleeper(cfs_rq, se); enqueue_sleeper(cfs_rq, se);
} }
...@@ -492,8 +503,8 @@ static void ...@@ -492,8 +503,8 @@ static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
{ {
update_stats_dequeue(cfs_rq, se); update_stats_dequeue(cfs_rq, se);
#ifdef CONFIG_SCHEDSTATS
if (sleep) { if (sleep) {
#ifdef CONFIG_SCHEDSTATS
if (entity_is_task(se)) { if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se); struct task_struct *tsk = task_of(se);
...@@ -502,8 +513,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) ...@@ -502,8 +513,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
if (tsk->state & TASK_UNINTERRUPTIBLE) if (tsk->state & TASK_UNINTERRUPTIBLE)
se->block_start = rq_of(cfs_rq)->clock; se->block_start = rq_of(cfs_rq)->clock;
} }
}
#endif #endif
/* se->vruntime = entity_key(cfs_rq, se); */
se->last_min_vruntime = cfs_rq->min_vruntime;
}
if (se != cfs_rq->curr) if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se); __dequeue_entity(cfs_rq, se);
account_entity_dequeue(cfs_rq, se); account_entity_dequeue(cfs_rq, se);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment