Commit a558cd02 authored by John Stultz's avatar John Stultz Committed by Ingo Molnar

timekeeping: Add checks to cap clocksource reads to the 'max_cycles' value

When calculating the current delta since the last tick, we
currently have no hard protections to prevent a multiplication
overflow from occuring.

This patch introduces infrastructure to allow a cap that
limits the clocksource read delta value to the 'max_cycles' value,
which is where an overflow would occur.

Since this is in the hotpath, it adds the extra checking under
CONFIG_DEBUG_TIMEKEEPING=y.

There was some concern that capping time like this could cause
problems as we may stop expiring timers, which could go circular
if the timer that triggers time accumulation were mis-scheduled
too far in the future, which would cause time to stop.

However, since the mult overflow would result in a smaller time
value, we would effectively have the same problem there.
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Cc: Dave Jones <davej@codemonkey.org.uk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Prarit Bhargava <prarit@redhat.com>
Cc: Richard Cochran <richardcochran@gmail.com>
Cc: Stephen Boyd <sboyd@codeaurora.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1426133800-29329-6-git-send-email-john.stultz@linaro.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 3c17ad19
...@@ -126,9 +126,9 @@ static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) ...@@ -126,9 +126,9 @@ static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
const char *name = tk->tkr.clock->name; const char *name = tk->tkr.clock->name;
if (offset > max_cycles) { if (offset > max_cycles) {
printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow\n", printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
offset, name, max_cycles); offset, name, max_cycles);
printk_deferred(" timekeeping: Your kernel is sick, but tries to cope\n"); printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
} else { } else {
if (offset > (max_cycles >> 1)) { if (offset > (max_cycles >> 1)) {
printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the the '%s' clock's 50%% safety margin (%lld)\n", printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the the '%s' clock's 50%% safety margin (%lld)\n",
...@@ -137,10 +137,39 @@ static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) ...@@ -137,10 +137,39 @@ static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
} }
} }
} }
static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
{
cycle_t cycle_now, delta;
/* read clocksource */
cycle_now = tkr->read(tkr->clock);
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
/* Cap delta value to the max_cycles values to avoid mult overflows */
if (unlikely(delta > tkr->clock->max_cycles))
delta = tkr->clock->max_cycles;
return delta;
}
#else #else
static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
{ {
} }
static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
{
cycle_t cycle_now, delta;
/* read clocksource */
cycle_now = tkr->read(tkr->clock);
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
return delta;
}
#endif #endif
/** /**
...@@ -218,14 +247,10 @@ static inline u32 arch_gettimeoffset(void) { return 0; } ...@@ -218,14 +247,10 @@ static inline u32 arch_gettimeoffset(void) { return 0; }
static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
{ {
cycle_t cycle_now, delta; cycle_t delta;
s64 nsec; s64 nsec;
/* read clocksource: */ delta = timekeeping_get_delta(tkr);
cycle_now = tkr->read(tkr->clock);
/* calculate the delta since the last update_wall_time: */
delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
nsec = delta * tkr->mult + tkr->xtime_nsec; nsec = delta * tkr->mult + tkr->xtime_nsec;
nsec >>= tkr->shift; nsec >>= tkr->shift;
...@@ -237,14 +262,10 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) ...@@ -237,14 +262,10 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
{ {
struct clocksource *clock = tk->tkr.clock; struct clocksource *clock = tk->tkr.clock;
cycle_t cycle_now, delta; cycle_t delta;
s64 nsec; s64 nsec;
/* read clocksource: */ delta = timekeeping_get_delta(&tk->tkr);
cycle_now = tk->tkr.read(clock);
/* calculate the delta since the last update_wall_time: */
delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
/* convert delta to nanoseconds. */ /* convert delta to nanoseconds. */
nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift); nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment