Commit 601633f4 authored by Dave Jones's avatar Dave Jones Committed by Dave Jones

[CPUFREQ] Handle CPU frequency changing behind our back.

Once we detected 50 consecutive ticks with lost ticks (and this is half of
the amount needed to trigger the fallback to a "sane" timesource), verify
the CPU frequency is in sync if cpufreq is used: sometimes the CPU frequency
changes behind the user's back, and then the TSC detects lost ticks. By a
call to cpufreq_get(), the frequency the TSC driver thinks the CPU is in
is updated to the actual frequency, in case these differ. Works really nice
on my notebook -- it's never falling back to a different timesource now, even
if I plug in the power cord.
parent a150ef02
...@@ -27,6 +27,8 @@ static unsigned long hpet_last; ...@@ -27,6 +27,8 @@ static unsigned long hpet_last;
struct timer_opts timer_tsc; struct timer_opts timer_tsc;
#endif #endif
static inline void cpufreq_delayed_get(void);
int tsc_disable __initdata = 0; int tsc_disable __initdata = 0;
extern spinlock_t i8253_lock; extern spinlock_t i8253_lock;
...@@ -241,6 +243,9 @@ static void mark_offset_tsc(void) ...@@ -241,6 +243,9 @@ static void mark_offset_tsc(void)
clock_fallback(); clock_fallback();
} }
/* ... but give the TSC a fair chance */
if (lost_count == 50)
cpufreq_delayed_get();
} else } else
lost_count = 0; lost_count = 0;
/* update the monotonic base value */ /* update the monotonic base value */
...@@ -324,6 +329,29 @@ static void mark_offset_tsc_hpet(void) ...@@ -324,6 +329,29 @@ static void mark_offset_tsc_hpet(void)
#ifdef CONFIG_CPU_FREQ #ifdef CONFIG_CPU_FREQ
#include <linux/workqueue.h>
static unsigned int cpufreq_init = 0;
static struct work_struct cpufreq_delayed_get_work;
static void handle_cpufreq_delayed_get(void *v)
{
unsigned int cpu;
for_each_online_cpu(cpu) {
cpufreq_get(cpu);
}
}
/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
* to verify the CPU frequency the timing core thinks the CPU is running
* at is still correct.
*/
static inline void cpufreq_delayed_get(void)
{
if (cpufreq_init)
schedule_work(&cpufreq_delayed_get_work);
}
/* If the CPU frequency is scaled, TSC-based delays will need a different /* If the CPU frequency is scaled, TSC-based delays will need a different
* loops_per_jiffy value to function properly. An exception to this * loops_per_jiffy value to function properly. An exception to this
* are modern Intel Pentium 4 processors, where the TSC runs at a constant * are modern Intel Pentium 4 processors, where the TSC runs at a constant
...@@ -383,6 +411,8 @@ static struct notifier_block time_cpufreq_notifier_block = { ...@@ -383,6 +411,8 @@ static struct notifier_block time_cpufreq_notifier_block = {
static int __init cpufreq_tsc(void) static int __init cpufreq_tsc(void)
{ {
INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
cpufreq_init = 1;
/* P4 and above CPU TSC freq doesn't change when CPU frequency changes*/ /* P4 and above CPU TSC freq doesn't change when CPU frequency changes*/
if ((boot_cpu_data.x86 >= 15) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) if ((boot_cpu_data.x86 >= 15) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL))
variable_tsc = 0; variable_tsc = 0;
...@@ -391,6 +421,8 @@ static int __init cpufreq_tsc(void) ...@@ -391,6 +421,8 @@ static int __init cpufreq_tsc(void)
} }
core_initcall(cpufreq_tsc); core_initcall(cpufreq_tsc);
#else /* CONFIG_CPU_FREQ */
static inline void cpufreq_delayed_get(void) { return; }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment