Commit a7be37ac authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: revert the revert of: weight calculations

Try again..

initial commit: 8f1bc385
revert: f9305d4aSigned-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent bf647b62
...@@ -1342,6 +1342,9 @@ static void __resched_task(struct task_struct *p, int tif_bit) ...@@ -1342,6 +1342,9 @@ static void __resched_task(struct task_struct *p, int tif_bit)
*/ */
#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
/*
* delta *= weight / lw
*/
static unsigned long static unsigned long
calc_delta_mine(unsigned long delta_exec, unsigned long weight, calc_delta_mine(unsigned long delta_exec, unsigned long weight,
struct load_weight *lw) struct load_weight *lw)
...@@ -1369,12 +1372,6 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, ...@@ -1369,12 +1372,6 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
} }
static inline unsigned long
calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
{
return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
}
static inline void update_load_add(struct load_weight *lw, unsigned long inc) static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{ {
lw->weight += inc; lw->weight += inc;
......
...@@ -333,6 +333,34 @@ int sched_nr_latency_handler(struct ctl_table *table, int write, ...@@ -333,6 +333,34 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
} }
#endif #endif
/*
* delta *= w / rw
*/
static inline unsigned long
calc_delta_weight(unsigned long delta, struct sched_entity *se)
{
for_each_sched_entity(se) {
delta = calc_delta_mine(delta,
se->load.weight, &cfs_rq_of(se)->load);
}
return delta;
}
/*
* delta *= rw / w
*/
static inline unsigned long
calc_delta_fair(unsigned long delta, struct sched_entity *se)
{
for_each_sched_entity(se) {
delta = calc_delta_mine(delta,
cfs_rq_of(se)->load.weight, &se->load);
}
return delta;
}
/* /*
* The idea is to set a period in which each task runs once. * The idea is to set a period in which each task runs once.
* *
...@@ -362,47 +390,54 @@ static u64 __sched_period(unsigned long nr_running) ...@@ -362,47 +390,54 @@ static u64 __sched_period(unsigned long nr_running)
*/ */
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
u64 slice = __sched_period(cfs_rq->nr_running); return calc_delta_weight(__sched_period(cfs_rq->nr_running), se);
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
slice *= se->load.weight;
do_div(slice, cfs_rq->load.weight);
}
return slice;
} }
/* /*
* We calculate the vruntime slice of a to be inserted task * We calculate the vruntime slice of a to be inserted task
* *
* vs = s/w = p/rw * vs = s*rw/w = p
*/ */
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
unsigned long nr_running = cfs_rq->nr_running; unsigned long nr_running = cfs_rq->nr_running;
unsigned long weight;
u64 vslice;
if (!se->on_rq) if (!se->on_rq)
nr_running++; nr_running++;
vslice = __sched_period(nr_running); return __sched_period(nr_running);
}
/*
* The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in
* that it favours >=0 over <0.
*
* -20 |
* |
* 0 --------+-------
* .'
* 19 .'
*
*/
static unsigned long
calc_delta_asym(unsigned long delta, struct sched_entity *se)
{
struct load_weight lw = {
.weight = NICE_0_LOAD,
.inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT)
};
for_each_sched_entity(se) { for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se); struct load_weight *se_lw = &se->load;
weight = cfs_rq->load.weight; if (se->load.weight < NICE_0_LOAD)
if (!se->on_rq) se_lw = &lw;
weight += se->load.weight;
vslice *= NICE_0_LOAD; delta = calc_delta_mine(delta,
do_div(vslice, weight); cfs_rq_of(se)->load.weight, se_lw);
} }
return vslice; return delta;
} }
/* /*
...@@ -419,11 +454,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, ...@@ -419,11 +454,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
curr->sum_exec_runtime += delta_exec; curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq, exec_clock, delta_exec); schedstat_add(cfs_rq, exec_clock, delta_exec);
delta_exec_weighted = delta_exec; delta_exec_weighted = calc_delta_fair(delta_exec, curr);
if (unlikely(curr->load.weight != NICE_0_LOAD)) {
delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
&curr->load);
}
curr->vruntime += delta_exec_weighted; curr->vruntime += delta_exec_weighted;
} }
...@@ -609,8 +640,17 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) ...@@ -609,8 +640,17 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
if (!initial) { if (!initial) {
/* sleeps upto a single latency don't count. */ /* sleeps upto a single latency don't count. */
if (sched_feat(NEW_FAIR_SLEEPERS)) if (sched_feat(NEW_FAIR_SLEEPERS)) {
vruntime -= sysctl_sched_latency; unsigned long thresh = sysctl_sched_latency;
/*
* convert the sleeper threshold into virtual time
*/
if (sched_feat(NORMALIZED_SLEEPER))
thresh = calc_delta_fair(thresh, se);
vruntime -= thresh;
}
/* ensure we never gain time by being placed backwards. */ /* ensure we never gain time by being placed backwards. */
vruntime = max_vruntime(se->vruntime, vruntime); vruntime = max_vruntime(se->vruntime, vruntime);
...@@ -1111,11 +1151,10 @@ static unsigned long wakeup_gran(struct sched_entity *se) ...@@ -1111,11 +1151,10 @@ static unsigned long wakeup_gran(struct sched_entity *se)
unsigned long gran = sysctl_sched_wakeup_granularity; unsigned long gran = sysctl_sched_wakeup_granularity;
/* /*
* More easily preempt - nice tasks, while not making * More easily preempt - nice tasks, while not making it harder for
* it harder for + nice tasks. * + nice tasks.
*/ */
if (unlikely(se->load.weight > NICE_0_LOAD)) gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
gran = calc_delta_fair(gran, &se->load);
return gran; return gran;
} }
......
SCHED_FEAT(NEW_FAIR_SLEEPERS, 1) SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
SCHED_FEAT(NORMALIZED_SLEEPER, 1)
SCHED_FEAT(WAKEUP_PREEMPT, 1) SCHED_FEAT(WAKEUP_PREEMPT, 1)
SCHED_FEAT(START_DEBIT, 1) SCHED_FEAT(START_DEBIT, 1)
SCHED_FEAT(AFFINE_WAKEUPS, 1) SCHED_FEAT(AFFINE_WAKEUPS, 1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment