Commit 3dc167ba authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Peter Zijlstra

sched/cputime: Improve cputime_adjust()

People report that utime and stime from /proc/<pid>/stat become very
wrong when the numbers are big enough, especially if you watch these
counters incrementally.

Specifically, the current implementation of: stime*rtime/total,
results in a saw-tooth function on top of the desired line, where the
teeth grow in size the larger the values become. IOW, it has a
relative error.

The result is that, when watching incrementally as time progresses
(for large values), we'll see periods of pure stime or utime increase,
irrespective of the actual ratio we're striving for.

Replace scale_stime() with a math64.h helper: mul_u64_u64_div_u64()
that is far more accurate. This also allows architectures to override
the implementation -- for instance they can opt for the old algorithm
if this new one turns out to be too expensive for them.
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200519172506.GA317395@hirez.programming.kicks-ass.net
parent b3a9e3b9
...@@ -74,16 +74,26 @@ static inline u64 mul_u32_u32(u32 a, u32 b) ...@@ -74,16 +74,26 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
#else #else
# include <asm-generic/div64.h> # include <asm-generic/div64.h>
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div) /*
* Will generate an #DE when the result doesn't fit u64, could fix with an
* __ex_table[] entry when it becomes an issue.
*/
static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div)
{ {
u64 q; u64 q;
asm ("mulq %2; divq %3" : "=a" (q) asm ("mulq %2; divq %3" : "=a" (q)
: "a" (a), "rm" ((u64)mul), "rm" ((u64)div) : "a" (a), "rm" (mul), "rm" (div)
: "rdx"); : "rdx");
return q; return q;
} }
#define mul_u64_u64_div_u64 mul_u64_u64_div_u64
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div)
{
return mul_u64_u64_div_u64(a, mul, div);
}
#define mul_u64_u32_div mul_u64_u32_div #define mul_u64_u32_div mul_u64_u32_div
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_32 */
......
...@@ -263,6 +263,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) ...@@ -263,6 +263,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
} }
#endif /* mul_u64_u32_div */ #endif /* mul_u64_u32_div */
u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
#define DIV64_U64_ROUND_UP(ll, d) \ #define DIV64_U64_ROUND_UP(ll, d) \
({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
......
...@@ -519,50 +519,6 @@ void account_idle_ticks(unsigned long ticks) ...@@ -519,50 +519,6 @@ void account_idle_ticks(unsigned long ticks)
account_idle_time(cputime); account_idle_time(cputime);
} }
/*
* Perform (stime * rtime) / total, but avoid multiplication overflow by
* losing precision when the numbers are big.
*/
static u64 scale_stime(u64 stime, u64 rtime, u64 total)
{
u64 scaled;
for (;;) {
/* Make sure "rtime" is the bigger of stime/rtime */
if (stime > rtime)
swap(rtime, stime);
/* Make sure 'total' fits in 32 bits */
if (total >> 32)
goto drop_precision;
/* Does rtime (and thus stime) fit in 32 bits? */
if (!(rtime >> 32))
break;
/* Can we just balance rtime/stime rather than dropping bits? */
if (stime >> 31)
goto drop_precision;
/* We can grow stime and shrink rtime and try to make them both fit */
stime <<= 1;
rtime >>= 1;
continue;
drop_precision:
/* We drop from rtime, it has more bits than stime */
rtime >>= 1;
total >>= 1;
}
/*
* Make sure gcc understands that this is a 32x32->64 multiply,
* followed by a 64/32->64 divide.
*/
scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
return scaled;
}
/* /*
* Adjust tick based cputime random precision against scheduler runtime * Adjust tick based cputime random precision against scheduler runtime
* accounting. * accounting.
...@@ -622,7 +578,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, ...@@ -622,7 +578,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
goto update; goto update;
} }
stime = scale_stime(stime, rtime, stime + utime); stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
update: update:
/* /*
......
...@@ -190,3 +190,44 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) ...@@ -190,3 +190,44 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return __iter_div_u64_rem(dividend, divisor, remainder); return __iter_div_u64_rem(dividend, divisor, remainder);
} }
EXPORT_SYMBOL(iter_div_u64_rem); EXPORT_SYMBOL(iter_div_u64_rem);
#ifndef mul_u64_u64_div_u64
u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
{
u64 res = 0, div, rem;
int shift;
/* can a * b overflow ? */
if (ilog2(a) + ilog2(b) > 62) {
/*
* (b * a) / c is equal to
*
* (b / c) * a +
* (b % c) * a / c
*
* if nothing overflows. Can the 1st multiplication
* overflow? Yes, but we do not care: this can only
* happen if the end result can't fit in u64 anyway.
*
* So the code below does
*
* res = (b / c) * a;
* b = b % c;
*/
div = div64_u64_rem(b, c, &rem);
res = div * a;
b = rem;
shift = ilog2(a) + ilog2(b) - 62;
if (shift > 0) {
/* drop precision */
b >>= shift;
c >>= shift;
if (!c)
return res;
}
}
return res + div64_u64(a * b, c);
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment