Commit a8006bd9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fixes from Ingo Molnar:
 "Fix four timer locking races: two were noticed by Linus while
  reviewing the code while chasing for a corruption bug, and two
  from fixing spurious USB timeouts"

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  timers: Prevent base clock corruption when forwarding
  timers: Prevent base clock rewind when forwarding clock
  timers: Lock base for same bucket optimization
  timers: Plug locking race vs. timer migration
parents 965c4b7e 6bad6bcc
...@@ -878,7 +878,7 @@ static inline struct timer_base *get_timer_base(u32 tflags) ...@@ -878,7 +878,7 @@ static inline struct timer_base *get_timer_base(u32 tflags)
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
static inline struct timer_base * static inline struct timer_base *
__get_target_base(struct timer_base *base, unsigned tflags) get_target_base(struct timer_base *base, unsigned tflags)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if ((tflags & TIMER_PINNED) || !base->migration_enabled) if ((tflags & TIMER_PINNED) || !base->migration_enabled)
...@@ -891,25 +891,27 @@ __get_target_base(struct timer_base *base, unsigned tflags) ...@@ -891,25 +891,27 @@ __get_target_base(struct timer_base *base, unsigned tflags)
static inline void forward_timer_base(struct timer_base *base) static inline void forward_timer_base(struct timer_base *base)
{ {
unsigned long jnow = READ_ONCE(jiffies);
/* /*
* We only forward the base when it's idle and we have a delta between * We only forward the base when it's idle and we have a delta between
* base clock and jiffies. * base clock and jiffies.
*/ */
if (!base->is_idle || (long) (jiffies - base->clk) < 2) if (!base->is_idle || (long) (jnow - base->clk) < 2)
return; return;
/* /*
* If the next expiry value is > jiffies, then we fast forward to * If the next expiry value is > jiffies, then we fast forward to
* jiffies otherwise we forward to the next expiry value. * jiffies otherwise we forward to the next expiry value.
*/ */
if (time_after(base->next_expiry, jiffies)) if (time_after(base->next_expiry, jnow))
base->clk = jiffies; base->clk = jnow;
else else
base->clk = base->next_expiry; base->clk = base->next_expiry;
} }
#else #else
static inline struct timer_base * static inline struct timer_base *
__get_target_base(struct timer_base *base, unsigned tflags) get_target_base(struct timer_base *base, unsigned tflags)
{ {
return get_timer_this_cpu_base(tflags); return get_timer_this_cpu_base(tflags);
} }
...@@ -917,14 +919,6 @@ __get_target_base(struct timer_base *base, unsigned tflags) ...@@ -917,14 +919,6 @@ __get_target_base(struct timer_base *base, unsigned tflags)
static inline void forward_timer_base(struct timer_base *base) { } static inline void forward_timer_base(struct timer_base *base) { }
#endif #endif
static inline struct timer_base *
get_target_base(struct timer_base *base, unsigned tflags)
{
struct timer_base *target = __get_target_base(base, tflags);
forward_timer_base(target);
return target;
}
/* /*
* We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
...@@ -943,7 +937,14 @@ static struct timer_base *lock_timer_base(struct timer_list *timer, ...@@ -943,7 +937,14 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
{ {
for (;;) { for (;;) {
struct timer_base *base; struct timer_base *base;
u32 tf = timer->flags; u32 tf;
/*
* We need to use READ_ONCE() here, otherwise the compiler
* might re-read @tf between the check for TIMER_MIGRATING
* and spin_lock().
*/
tf = READ_ONCE(timer->flags);
if (!(tf & TIMER_MIGRATING)) { if (!(tf & TIMER_MIGRATING)) {
base = get_timer_base(tf); base = get_timer_base(tf);
...@@ -964,6 +965,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) ...@@ -964,6 +965,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
unsigned long clk = 0, flags; unsigned long clk = 0, flags;
int ret = 0; int ret = 0;
BUG_ON(!timer->function);
/* /*
* This is a common optimization triggered by the networking code - if * This is a common optimization triggered by the networking code - if
* the timer is re-modified to have the same timeout or ends up in the * the timer is re-modified to have the same timeout or ends up in the
...@@ -972,13 +975,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) ...@@ -972,13 +975,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
if (timer_pending(timer)) { if (timer_pending(timer)) {
if (timer->expires == expires) if (timer->expires == expires)
return 1; return 1;
/* /*
* Take the current timer_jiffies of base, but without holding * We lock timer base and calculate the bucket index right
* the lock! * here. If the timer ends up in the same bucket, then we
* just update the expiry time and avoid the whole
* dequeue/enqueue dance.
*/ */
base = get_timer_base(timer->flags); base = lock_timer_base(timer, &flags);
clk = base->clk;
clk = base->clk;
idx = calc_wheel_index(expires, clk); idx = calc_wheel_index(expires, clk);
/* /*
...@@ -988,14 +994,14 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) ...@@ -988,14 +994,14 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
*/ */
if (idx == timer_get_idx(timer)) { if (idx == timer_get_idx(timer)) {
timer->expires = expires; timer->expires = expires;
return 1; ret = 1;
goto out_unlock;
} }
} else {
base = lock_timer_base(timer, &flags);
} }
timer_stats_timer_set_start_info(timer); timer_stats_timer_set_start_info(timer);
BUG_ON(!timer->function);
base = lock_timer_base(timer, &flags);
ret = detach_if_pending(timer, base, false); ret = detach_if_pending(timer, base, false);
if (!ret && pending_only) if (!ret && pending_only)
...@@ -1025,12 +1031,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) ...@@ -1025,12 +1031,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
} }
} }
/* Try to forward a stale timer base clock */
forward_timer_base(base);
timer->expires = expires; timer->expires = expires;
/* /*
* If 'idx' was calculated above and the base time did not advance * If 'idx' was calculated above and the base time did not advance
* between calculating 'idx' and taking the lock, only enqueue_timer() * between calculating 'idx' and possibly switching the base, only
* and trigger_dyntick_cpu() is required. Otherwise we need to * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
* (re)calculate the wheel index via internal_add_timer(). * we need to (re)calculate the wheel index via
* internal_add_timer().
*/ */
if (idx != UINT_MAX && clk == base->clk) { if (idx != UINT_MAX && clk == base->clk) {
enqueue_timer(base, timer, idx); enqueue_timer(base, timer, idx);
...@@ -1510,12 +1520,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) ...@@ -1510,12 +1520,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
base->next_expiry = nextevt; base->next_expiry = nextevt;
/* /*
* We have a fresh next event. Check whether we can forward the base: * We have a fresh next event. Check whether we can forward the
* base. We can only do that when @basej is past base->clk
* otherwise we might rewind base->clk.
*/ */
if (time_after(nextevt, jiffies)) if (time_after(basej, base->clk)) {
base->clk = jiffies; if (time_after(nextevt, basej))
else if (time_after(nextevt, base->clk)) base->clk = basej;
base->clk = nextevt; else if (time_after(nextevt, base->clk))
base->clk = nextevt;
}
if (time_before_eq(nextevt, basej)) { if (time_before_eq(nextevt, basej)) {
expires = basem; expires = basem;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment