Commit 11697830 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: fix ideal_runtime calculations for reniced tasks

fix ideal_runtime:

  - do not scale it using niced_granularity()
    it is against sum_exec_delta, so its wall-time, not fair-time.

  - move the whole check into __check_preempt_curr_fair()
    so that wakeup preemption can also benefit from the new logic.

this also results in code size reduction:

   text    data     bss     dec     hex filename
  13391     228    1204   14823    39e7 sched.o.before
  13369     228    1204   14801    39d1 sched.o.after
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 4a55b450
...@@ -678,11 +678,31 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, ...@@ -678,11 +678,31 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
struct sched_entity *curr, unsigned long granularity) struct sched_entity *curr, unsigned long granularity)
{ {
s64 __delta = curr->fair_key - se->fair_key; s64 __delta = curr->fair_key - se->fair_key;
unsigned long ideal_runtime, delta_exec;
/*
* ideal_runtime is compared against sum_exec_runtime, which is
* walltime, hence do not scale.
*/
ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
(unsigned long)sysctl_sched_min_granularity);
/*
* If we executed more than what the latency constraint suggests,
* reduce the rescheduling granularity. This way the total latency
* of how much a task is not scheduled converges to
* sysctl_sched_latency:
*/
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime)
granularity = 0;
/* /*
* Take scheduling granularity into account - do not * Take scheduling granularity into account - do not
* preempt the current task unless the best task has * preempt the current task unless the best task has
* a larger than sched_granularity fairness advantage: * a larger than sched_granularity fairness advantage:
*
* scale granularity as key space is in fair_clock.
*/ */
if (__delta > niced_granularity(curr, granularity)) if (__delta > niced_granularity(curr, granularity))
resched_task(rq_of(cfs_rq)->curr); resched_task(rq_of(cfs_rq)->curr);
...@@ -731,7 +751,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) ...@@ -731,7 +751,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{ {
unsigned long gran, ideal_runtime, delta_exec;
struct sched_entity *next; struct sched_entity *next;
/* /*
...@@ -748,21 +767,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ...@@ -748,21 +767,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
if (next == curr) if (next == curr)
return; return;
gran = sched_granularity(cfs_rq); __check_preempt_curr_fair(cfs_rq, next, curr,
ideal_runtime = niced_granularity(curr, sched_granularity(cfs_rq));
max(sysctl_sched_latency / cfs_rq->nr_running,
(unsigned long)sysctl_sched_min_granularity));
/*
* If we executed more than what the latency constraint suggests,
* reduce the rescheduling granularity. This way the total latency
* of how much a task is not scheduled converges to
* sysctl_sched_latency:
*/
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime)
gran = 0;
__check_preempt_curr_fair(cfs_rq, next, curr, gran);
} }
/************************************************** /**************************************************
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment