Commit 194081eb authored by Ingo Molnar's avatar Ingo Molnar

sched: round a bit better

round a tiny bit better in high-frequency rescheduling scenarios,
by rounding around zero instead of rounding down.

(this is pretty theoretical though)
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 254753dc
...@@ -638,6 +638,11 @@ static u64 div64_likely32(u64 divident, unsigned long divisor) ...@@ -638,6 +638,11 @@ static u64 div64_likely32(u64 divident, unsigned long divisor)
#define WMULT_SHIFT 32 #define WMULT_SHIFT 32
/*
* Shift right and round:
*/
#define RSR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
static unsigned long static unsigned long
calc_delta_mine(unsigned long delta_exec, unsigned long weight, calc_delta_mine(unsigned long delta_exec, unsigned long weight,
struct load_weight *lw) struct load_weight *lw)
...@@ -645,18 +650,17 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, ...@@ -645,18 +650,17 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
u64 tmp; u64 tmp;
if (unlikely(!lw->inv_weight)) if (unlikely(!lw->inv_weight))
lw->inv_weight = WMULT_CONST / lw->weight; lw->inv_weight = (WMULT_CONST - lw->weight/2) / lw->weight + 1;
tmp = (u64)delta_exec * weight; tmp = (u64)delta_exec * weight;
/* /*
* Check whether we'd overflow the 64-bit multiplication: * Check whether we'd overflow the 64-bit multiplication:
*/ */
if (unlikely(tmp > WMULT_CONST)) { if (unlikely(tmp > WMULT_CONST))
tmp = ((tmp >> WMULT_SHIFT/2) * lw->inv_weight) tmp = RSR(RSR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
>> (WMULT_SHIFT/2); WMULT_SHIFT/2);
} else { else
tmp = (tmp * lw->inv_weight) >> WMULT_SHIFT; tmp = RSR(tmp * lw->inv_weight, WMULT_SHIFT);
}
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment