Commit 8553f321 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-fixes-for-linus' of...

Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  timers: fix build error in !oneshot case
  x86: c1e_idle: don't mark TSC unstable if CPU has invariant TSC
  x86: prevent C-states hang on AMD C1E enabled machines
  clockevents: prevent mode mismatch on cpu online
  clockevents: check broadcast device not tick device
  clockevents: prevent stale tick_next_period for onlining CPUs
  x86: prevent stale state of c1e_mask across CPU offline/online
  clockevents: prevent cpu online to interfere with nohz
parents be3be890 f8e256c6
...@@ -246,6 +246,14 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) ...@@ -246,6 +246,14 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
return 1; return 1;
} }
static cpumask_t c1e_mask = CPU_MASK_NONE;
static int c1e_detected;
void c1e_remove_cpu(int cpu)
{
cpu_clear(cpu, c1e_mask);
}
/* /*
* C1E aware idle routine. We check for C1E active in the interrupt * C1E aware idle routine. We check for C1E active in the interrupt
* pending message MSR. If we detect C1E, then we handle it the same * pending message MSR. If we detect C1E, then we handle it the same
...@@ -253,9 +261,6 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) ...@@ -253,9 +261,6 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
*/ */
static void c1e_idle(void) static void c1e_idle(void)
{ {
static cpumask_t c1e_mask = CPU_MASK_NONE;
static int c1e_detected;
if (need_resched()) if (need_resched())
return; return;
...@@ -265,8 +270,10 @@ static void c1e_idle(void) ...@@ -265,8 +270,10 @@ static void c1e_idle(void)
rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
if (lo & K8_INTP_C1E_ACTIVE_MASK) { if (lo & K8_INTP_C1E_ACTIVE_MASK) {
c1e_detected = 1; c1e_detected = 1;
mark_tsc_unstable("TSC halt in C1E"); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
printk(KERN_INFO "System has C1E enabled\n"); mark_tsc_unstable("TSC halt in AMD C1E");
printk(KERN_INFO "System has AMD C1E enabled\n");
set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
} }
} }
......
...@@ -88,6 +88,7 @@ static void cpu_exit_clear(void) ...@@ -88,6 +88,7 @@ static void cpu_exit_clear(void)
cpu_clear(cpu, cpu_callin_map); cpu_clear(cpu, cpu_callin_map);
numa_remove_cpu(cpu); numa_remove_cpu(cpu);
c1e_remove_cpu(cpu);
} }
/* We don't actually take CPU down, just spin without interrupts. */ /* We don't actually take CPU down, just spin without interrupts. */
......
...@@ -93,6 +93,8 @@ DECLARE_PER_CPU(int, cpu_state); ...@@ -93,6 +93,8 @@ DECLARE_PER_CPU(int, cpu_state);
static inline void play_dead(void) static inline void play_dead(void)
{ {
idle_task_exit(); idle_task_exit();
c1e_remove_cpu(raw_smp_processor_id());
mb(); mb();
/* Ack it */ /* Ack it */
__get_cpu_var(cpu_state) = CPU_DEAD; __get_cpu_var(cpu_state) = CPU_DEAD;
......
...@@ -140,6 +140,8 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) ...@@ -140,6 +140,8 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_model <= 0x05 &&
boot_cpu_data.x86_mask < 0x0A) boot_cpu_data.x86_mask < 0x0A)
return 1; return 1;
else if (boot_cpu_has(X86_FEATURE_AMDC1E))
return 1;
else else
return max_cstate; return max_cstate;
} }
......
...@@ -81,6 +81,7 @@ ...@@ -81,6 +81,7 @@
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */
#define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
......
...@@ -10,4 +10,6 @@ void idle_notifier_register(struct notifier_block *n); ...@@ -10,4 +10,6 @@ void idle_notifier_register(struct notifier_block *n);
void enter_idle(void); void enter_idle(void);
void exit_idle(void); void exit_idle(void);
void c1e_remove_cpu(int cpu);
#endif #endif
...@@ -235,7 +235,7 @@ static void tick_do_broadcast_on_off(void *why) ...@@ -235,7 +235,7 @@ static void tick_do_broadcast_on_off(void *why)
case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
if (!cpu_isset(cpu, tick_broadcast_mask)) { if (!cpu_isset(cpu, tick_broadcast_mask)) {
cpu_set(cpu, tick_broadcast_mask); cpu_set(cpu, tick_broadcast_mask);
if (td->mode == TICKDEV_MODE_PERIODIC) if (bc->mode == TICKDEV_MODE_PERIODIC)
clockevents_shutdown(dev); clockevents_shutdown(dev);
} }
if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
...@@ -245,7 +245,7 @@ static void tick_do_broadcast_on_off(void *why) ...@@ -245,7 +245,7 @@ static void tick_do_broadcast_on_off(void *why)
if (!tick_broadcast_force && if (!tick_broadcast_force &&
cpu_isset(cpu, tick_broadcast_mask)) { cpu_isset(cpu, tick_broadcast_mask)) {
cpu_clear(cpu, tick_broadcast_mask); cpu_clear(cpu, tick_broadcast_mask);
if (td->mode == TICKDEV_MODE_PERIODIC) if (bc->mode == TICKDEV_MODE_PERIODIC)
tick_setup_periodic(dev, 0); tick_setup_periodic(dev, 0);
} }
break; break;
...@@ -575,4 +575,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) ...@@ -575,4 +575,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
spin_unlock_irqrestore(&tick_broadcast_lock, flags); spin_unlock_irqrestore(&tick_broadcast_lock, flags);
} }
/*
* Check, whether the broadcast device is in one shot mode
*/
int tick_broadcast_oneshot_active(void)
{
return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}
#endif #endif
...@@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); ...@@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
*/ */
ktime_t tick_next_period; ktime_t tick_next_period;
ktime_t tick_period; ktime_t tick_period;
int tick_do_timer_cpu __read_mostly = -1; int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
DEFINE_SPINLOCK(tick_device_lock); DEFINE_SPINLOCK(tick_device_lock);
/* /*
...@@ -109,7 +109,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) ...@@ -109,7 +109,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
if (!tick_device_is_functional(dev)) if (!tick_device_is_functional(dev))
return; return;
if (dev->features & CLOCK_EVT_FEAT_PERIODIC) { if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
!tick_broadcast_oneshot_active()) {
clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
} else { } else {
unsigned long seq; unsigned long seq;
...@@ -148,7 +149,7 @@ static void tick_setup_device(struct tick_device *td, ...@@ -148,7 +149,7 @@ static void tick_setup_device(struct tick_device *td,
* If no cpu took the do_timer update, assign it to * If no cpu took the do_timer update, assign it to
* this cpu: * this cpu:
*/ */
if (tick_do_timer_cpu == -1) { if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
tick_do_timer_cpu = cpu; tick_do_timer_cpu = cpu;
tick_next_period = ktime_get(); tick_next_period = ktime_get();
tick_period = ktime_set(0, NSEC_PER_SEC / HZ); tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
...@@ -300,7 +301,8 @@ static void tick_shutdown(unsigned int *cpup) ...@@ -300,7 +301,8 @@ static void tick_shutdown(unsigned int *cpup)
if (*cpup == tick_do_timer_cpu) { if (*cpup == tick_do_timer_cpu) {
int cpu = first_cpu(cpu_online_map); int cpu = first_cpu(cpu_online_map);
tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1; tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu :
TICK_DO_TIMER_NONE;
} }
spin_unlock_irqrestore(&tick_device_lock, flags); spin_unlock_irqrestore(&tick_device_lock, flags);
} }
......
/* /*
* tick internal variable and functions used by low/high res code * tick internal variable and functions used by low/high res code
*/ */
#define TICK_DO_TIMER_NONE -1
#define TICK_DO_TIMER_BOOT -2
DECLARE_PER_CPU(struct tick_device, tick_cpu_device); DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
extern spinlock_t tick_device_lock; extern spinlock_t tick_device_lock;
extern ktime_t tick_next_period; extern ktime_t tick_next_period;
...@@ -31,6 +35,7 @@ extern void tick_broadcast_oneshot_control(unsigned long reason); ...@@ -31,6 +35,7 @@ extern void tick_broadcast_oneshot_control(unsigned long reason);
extern void tick_broadcast_switch_to_oneshot(void); extern void tick_broadcast_switch_to_oneshot(void);
extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
extern int tick_broadcast_oneshot_active(void);
# else /* BROADCAST */ # else /* BROADCAST */
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{ {
...@@ -39,6 +44,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) ...@@ -39,6 +44,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
static inline void tick_broadcast_oneshot_control(unsigned long reason) { } static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
static inline void tick_broadcast_switch_to_oneshot(void) { } static inline void tick_broadcast_switch_to_oneshot(void) { }
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
static inline int tick_broadcast_oneshot_active(void) { return 0; }
# endif /* !BROADCAST */ # endif /* !BROADCAST */
#else /* !ONESHOT */ #else /* !ONESHOT */
...@@ -68,6 +74,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) ...@@ -68,6 +74,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
{ {
return 0; return 0;
} }
static inline int tick_broadcast_oneshot_active(void) { return 0; }
#endif /* !TICK_ONESHOT */ #endif /* !TICK_ONESHOT */
/* /*
......
...@@ -75,6 +75,9 @@ static void tick_do_update_jiffies64(ktime_t now) ...@@ -75,6 +75,9 @@ static void tick_do_update_jiffies64(ktime_t now)
incr * ticks); incr * ticks);
} }
do_timer(++ticks); do_timer(++ticks);
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
} }
write_sequnlock(&xtime_lock); write_sequnlock(&xtime_lock);
} }
...@@ -221,7 +224,7 @@ void tick_nohz_stop_sched_tick(int inidle) ...@@ -221,7 +224,7 @@ void tick_nohz_stop_sched_tick(int inidle)
*/ */
if (unlikely(!cpu_online(cpu))) { if (unlikely(!cpu_online(cpu))) {
if (cpu == tick_do_timer_cpu) if (cpu == tick_do_timer_cpu)
tick_do_timer_cpu = -1; tick_do_timer_cpu = TICK_DO_TIMER_NONE;
} }
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
...@@ -303,7 +306,7 @@ void tick_nohz_stop_sched_tick(int inidle) ...@@ -303,7 +306,7 @@ void tick_nohz_stop_sched_tick(int inidle)
* invoked. * invoked.
*/ */
if (cpu == tick_do_timer_cpu) if (cpu == tick_do_timer_cpu)
tick_do_timer_cpu = -1; tick_do_timer_cpu = TICK_DO_TIMER_NONE;
ts->idle_sleeps++; ts->idle_sleeps++;
...@@ -468,7 +471,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) ...@@ -468,7 +471,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
* this duty, then the jiffies update is still serialized by * this duty, then the jiffies update is still serialized by
* xtime_lock. * xtime_lock.
*/ */
if (unlikely(tick_do_timer_cpu == -1)) if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
tick_do_timer_cpu = cpu; tick_do_timer_cpu = cpu;
/* Check, if the jiffies need an update */ /* Check, if the jiffies need an update */
...@@ -570,7 +573,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) ...@@ -570,7 +573,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
* this duty, then the jiffies update is still serialized by * this duty, then the jiffies update is still serialized by
* xtime_lock. * xtime_lock.
*/ */
if (unlikely(tick_do_timer_cpu == -1)) if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
tick_do_timer_cpu = cpu; tick_do_timer_cpu = cpu;
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment