Commit 02a37ccd authored by John Stultz's avatar John Stultz Committed by Greg Kroah-Hartman

time: Fix clock->read(clock) race around clocksource changes

commit ceea5e37 upstream.

In tests, which excercise switching of clocksources, a NULL
pointer dereference can be observed on AMR64 platforms in the
clocksource read() function:

u64 clocksource_mmio_readl_down(struct clocksource *c)
{
	return ~(u64)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
}

This is called from the core timekeeping code via:

	cycle_now = tkr->read(tkr->clock);

tkr->read is the cached tkr->clock->read() function pointer.
When the clocksource is changed then tkr->clock and tkr->read
are updated sequentially. The code above results in a sequential
load operation of tkr->read and tkr->clock as well.

If the store to tkr->clock hits between the loads of tkr->read
and tkr->clock, then the old read() function is called with the
new clock pointer. As a consequence the read() function
dereferences a different data structure and the resulting 'reg'
pointer can point anywhere including NULL.

This problem was introduced when the timekeeping code was
switched over to use struct tk_read_base. Before that, it was
theoretically possible as well when the compiler decided to
reload clock in the code sequence:

     now = tk->clock->read(tk->clock);

Add a helper function which avoids the issue by reading
tk_read_base->clock once into a local variable clk and then issue
the read function via clk->read(clk). This guarantees that the
read() function always gets the proper clocksource pointer handed
in.

Since there is now no use for the tkr.read pointer, this patch
also removes it, and to address stopping the fast timekeeper
during suspend/resume, it introduces a dummy clocksource to use
rather then just a dummy read function.
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Acked-by: default avatarIngo Molnar <mingo@kernel.org>
Cc: Prarit Bhargava <prarit@redhat.com>
Cc: Richard Cochran <richardcochran@gmail.com>
Cc: Stephen Boyd <stephen.boyd@linaro.org>
Cc: Miroslav Lichvar <mlichvar@redhat.com>
Cc: Daniel Mentz <danielmentz@google.com>
Link: http://lkml.kernel.org/r/1496965462-20003-2-git-send-email-john.stultz@linaro.orgSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c81d034b
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
*/ */
struct tk_read_base { struct tk_read_base {
struct clocksource *clock; struct clocksource *clock;
cycle_t (*read)(struct clocksource *cs);
cycle_t mask; cycle_t mask;
cycle_t cycle_last; cycle_t cycle_last;
u32 mult; u32 mult;
......
...@@ -116,6 +116,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) ...@@ -116,6 +116,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
tk->offs_boot = ktime_add(tk->offs_boot, delta); tk->offs_boot = ktime_add(tk->offs_boot, delta);
} }
/*
* tk_clock_read - atomic clocksource read() helper
*
* This helper is necessary to use in the read paths because, while the
* seqlock ensures we don't return a bad value while structures are updated,
* it doesn't protect from potential crashes. There is the possibility that
* the tkr's clocksource may change between the read reference, and the
* clock reference passed to the read function. This can cause crashes if
* the wrong clocksource is passed to the wrong read function.
* This isn't necessary to use when holding the timekeeper_lock or doing
* a read of the fast-timekeeper tkrs (which is protected by its own locking
* and update logic).
*/
static inline u64 tk_clock_read(struct tk_read_base *tkr)
{
struct clocksource *clock = READ_ONCE(tkr->clock);
return clock->read(clock);
}
#ifdef CONFIG_DEBUG_TIMEKEEPING #ifdef CONFIG_DEBUG_TIMEKEEPING
#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
...@@ -173,7 +193,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) ...@@ -173,7 +193,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
*/ */
do { do {
seq = read_seqcount_begin(&tk_core.seq); seq = read_seqcount_begin(&tk_core.seq);
now = tkr->read(tkr->clock); now = tk_clock_read(tkr);
last = tkr->cycle_last; last = tkr->cycle_last;
mask = tkr->mask; mask = tkr->mask;
max = tkr->clock->max_cycles; max = tkr->clock->max_cycles;
...@@ -207,7 +227,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) ...@@ -207,7 +227,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
cycle_t cycle_now, delta; cycle_t cycle_now, delta;
/* read clocksource */ /* read clocksource */
cycle_now = tkr->read(tkr->clock); cycle_now = tk_clock_read(tkr);
/* calculate the delta since the last update_wall_time */ /* calculate the delta since the last update_wall_time */
delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
...@@ -236,12 +256,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) ...@@ -236,12 +256,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
++tk->cs_was_changed_seq; ++tk->cs_was_changed_seq;
old_clock = tk->tkr_mono.clock; old_clock = tk->tkr_mono.clock;
tk->tkr_mono.clock = clock; tk->tkr_mono.clock = clock;
tk->tkr_mono.read = clock->read;
tk->tkr_mono.mask = clock->mask; tk->tkr_mono.mask = clock->mask;
tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
tk->tkr_raw.clock = clock; tk->tkr_raw.clock = clock;
tk->tkr_raw.read = clock->read;
tk->tkr_raw.mask = clock->mask; tk->tkr_raw.mask = clock->mask;
tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
...@@ -405,7 +423,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) ...@@ -405,7 +423,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
now += timekeeping_delta_to_ns(tkr, now += timekeeping_delta_to_ns(tkr,
clocksource_delta( clocksource_delta(
tkr->read(tkr->clock), tk_clock_read(tkr),
tkr->cycle_last, tkr->cycle_last,
tkr->mask)); tkr->mask));
} while (read_seqcount_retry(&tkf->seq, seq)); } while (read_seqcount_retry(&tkf->seq, seq));
...@@ -433,6 +451,10 @@ static cycle_t dummy_clock_read(struct clocksource *cs) ...@@ -433,6 +451,10 @@ static cycle_t dummy_clock_read(struct clocksource *cs)
return cycles_at_suspend; return cycles_at_suspend;
} }
static struct clocksource dummy_clock = {
.read = dummy_clock_read,
};
/** /**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot. * @tk: Timekeeper to snapshot.
...@@ -449,13 +471,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk) ...@@ -449,13 +471,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
struct tk_read_base *tkr = &tk->tkr_mono; struct tk_read_base *tkr = &tk->tkr_mono;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
cycles_at_suspend = tkr->read(tkr->clock); cycles_at_suspend = tk_clock_read(tkr);
tkr_dummy.read = dummy_clock_read; tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
tkr = &tk->tkr_raw; tkr = &tk->tkr_raw;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
tkr_dummy.read = dummy_clock_read; tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_raw); update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
} }
...@@ -621,11 +643,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) ...@@ -621,11 +643,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
*/ */
static void timekeeping_forward_now(struct timekeeper *tk) static void timekeeping_forward_now(struct timekeeper *tk)
{ {
struct clocksource *clock = tk->tkr_mono.clock;
cycle_t cycle_now, delta; cycle_t cycle_now, delta;
s64 nsec; s64 nsec;
cycle_now = tk->tkr_mono.read(clock); cycle_now = tk_clock_read(&tk->tkr_mono);
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
tk->tkr_mono.cycle_last = cycle_now; tk->tkr_mono.cycle_last = cycle_now;
tk->tkr_raw.cycle_last = cycle_now; tk->tkr_raw.cycle_last = cycle_now;
...@@ -901,8 +922,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot) ...@@ -901,8 +922,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
do { do {
seq = read_seqcount_begin(&tk_core.seq); seq = read_seqcount_begin(&tk_core.seq);
now = tk_clock_read(&tk->tkr_mono);
now = tk->tkr_mono.read(tk->tkr_mono.clock);
systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
base_real = ktime_add(tk->tkr_mono.base, base_real = ktime_add(tk->tkr_mono.base,
...@@ -1081,7 +1101,7 @@ int get_device_system_crosststamp(int (*get_time_fn) ...@@ -1081,7 +1101,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
* Check whether the system counter value provided by the * Check whether the system counter value provided by the
* device driver is on the current timekeeping interval. * device driver is on the current timekeeping interval.
*/ */
now = tk->tkr_mono.read(tk->tkr_mono.clock); now = tk_clock_read(&tk->tkr_mono);
interval_start = tk->tkr_mono.cycle_last; interval_start = tk->tkr_mono.cycle_last;
if (!cycle_between(interval_start, cycles, now)) { if (!cycle_between(interval_start, cycles, now)) {
clock_was_set_seq = tk->clock_was_set_seq; clock_was_set_seq = tk->clock_was_set_seq;
...@@ -1639,7 +1659,7 @@ void timekeeping_resume(void) ...@@ -1639,7 +1659,7 @@ void timekeeping_resume(void)
* The less preferred source will only be tried if there is no better * The less preferred source will only be tried if there is no better
* usable source. The rtc part is handled separately in rtc core code. * usable source. The rtc part is handled separately in rtc core code.
*/ */
cycle_now = tk->tkr_mono.read(clock); cycle_now = tk_clock_read(&tk->tkr_mono);
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
cycle_now > tk->tkr_mono.cycle_last) { cycle_now > tk->tkr_mono.cycle_last) {
u64 num, max = ULLONG_MAX; u64 num, max = ULLONG_MAX;
...@@ -2057,7 +2077,7 @@ void update_wall_time(void) ...@@ -2057,7 +2077,7 @@ void update_wall_time(void)
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
offset = real_tk->cycle_interval; offset = real_tk->cycle_interval;
#else #else
offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
tk->tkr_mono.cycle_last, tk->tkr_mono.mask); tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment