Commit eb90d81d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-tip:
  x86: prevent PGE flush from interruption/preemption
  x86: use explicit copy in vdso_gettimeofday()
  namespacecheck: automated fixes
  x86/xen: fix arbitrary_virt_to_machine()
  x86: don't read maxlvt before checking if APIC is mapped
  x86: disable TSC for sched_clock() when calibration failed
  x86: distangle user disabled TSC from unstable
  x86: fix setup of cyc2ns in tsc_64.c
parents d3c5f8b9 b1979a5f
...@@ -534,7 +534,7 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -534,7 +534,7 @@ int setup_profiling_timer(unsigned int multiplier)
*/ */
void clear_local_APIC(void) void clear_local_APIC(void)
{ {
int maxlvt = lapic_get_maxlvt(); int maxlvt;
u32 v; u32 v;
/* APIC hasn't been mapped yet */ /* APIC hasn't been mapped yet */
......
...@@ -53,7 +53,7 @@ static cycle_t kvm_clock_read(void); ...@@ -53,7 +53,7 @@ static cycle_t kvm_clock_read(void);
* have elapsed since the hypervisor wrote the data. So we try to account for * have elapsed since the hypervisor wrote the data. So we try to account for
* that with system time * that with system time
*/ */
unsigned long kvm_get_wallclock(void) static unsigned long kvm_get_wallclock(void)
{ {
u32 wc_sec, wc_nsec; u32 wc_sec, wc_nsec;
u64 delta; u64 delta;
...@@ -86,7 +86,7 @@ unsigned long kvm_get_wallclock(void) ...@@ -86,7 +86,7 @@ unsigned long kvm_get_wallclock(void)
return ts.tv_sec + 1; return ts.tv_sec + 1;
} }
int kvm_set_wallclock(unsigned long now) static int kvm_set_wallclock(unsigned long now)
{ {
return 0; return 0;
} }
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include "mach_timer.h" #include "mach_timer.h"
static int tsc_enabled; static int tsc_disabled;
/* /*
* On some systems the TSC frequency does not * On some systems the TSC frequency does not
...@@ -29,7 +29,7 @@ static int __init tsc_setup(char *str) ...@@ -29,7 +29,7 @@ static int __init tsc_setup(char *str)
{ {
printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
"cannot disable TSC completely.\n"); "cannot disable TSC completely.\n");
mark_tsc_unstable("user disabled TSC"); tsc_disabled = 1;
return 1; return 1;
} }
#else #else
...@@ -120,7 +120,7 @@ unsigned long long native_sched_clock(void) ...@@ -120,7 +120,7 @@ unsigned long long native_sched_clock(void)
* very important for it to be as fast as the platform * very important for it to be as fast as the platform
* can achive it. ) * can achive it. )
*/ */
if (unlikely(!tsc_enabled && !tsc_unstable)) if (unlikely(tsc_disabled))
/* No locking but a rare wrong value is not a big deal: */ /* No locking but a rare wrong value is not a big deal: */
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
...@@ -322,7 +322,6 @@ void mark_tsc_unstable(char *reason) ...@@ -322,7 +322,6 @@ void mark_tsc_unstable(char *reason)
{ {
if (!tsc_unstable) { if (!tsc_unstable) {
tsc_unstable = 1; tsc_unstable = 1;
tsc_enabled = 0;
printk("Marking TSC unstable due to: %s.\n", reason); printk("Marking TSC unstable due to: %s.\n", reason);
/* Can be called before registration */ /* Can be called before registration */
if (clocksource_tsc.mult) if (clocksource_tsc.mult)
...@@ -403,14 +402,22 @@ void __init tsc_init(void) ...@@ -403,14 +402,22 @@ void __init tsc_init(void)
{ {
int cpu; int cpu;
if (!cpu_has_tsc) if (!cpu_has_tsc || tsc_disabled) {
/* Disable the TSC in case of !cpu_has_tsc */
tsc_disabled = 1;
return; return;
}
cpu_khz = calculate_cpu_khz(); cpu_khz = calculate_cpu_khz();
tsc_khz = cpu_khz; tsc_khz = cpu_khz;
if (!cpu_khz) { if (!cpu_khz) {
mark_tsc_unstable("could not calculate TSC khz"); mark_tsc_unstable("could not calculate TSC khz");
/*
* We need to disable the TSC completely in this case
* to prevent sched_clock() from using it.
*/
tsc_disabled = 1;
return; return;
} }
...@@ -441,8 +448,6 @@ void __init tsc_init(void) ...@@ -441,8 +448,6 @@ void __init tsc_init(void)
if (check_tsc_unstable()) { if (check_tsc_unstable()) {
clocksource_tsc.rating = 0; clocksource_tsc.rating = 0;
clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
} else }
tsc_enabled = 1;
clocksource_register(&clocksource_tsc); clocksource_register(&clocksource_tsc);
} }
...@@ -227,14 +227,14 @@ void __init tsc_calibrate(void) ...@@ -227,14 +227,14 @@ void __init tsc_calibrate(void)
/* hpet or pmtimer available ? */ /* hpet or pmtimer available ? */
if (!hpet && !pm1 && !pm2) { if (!hpet && !pm1 && !pm2) {
printk(KERN_INFO "TSC calibrated against PIT\n"); printk(KERN_INFO "TSC calibrated against PIT\n");
return; goto out;
} }
/* Check, whether the sampling was disturbed by an SMI */ /* Check, whether the sampling was disturbed by an SMI */
if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) { if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) {
printk(KERN_WARNING "TSC calibration disturbed by SMI, " printk(KERN_WARNING "TSC calibration disturbed by SMI, "
"using PIT calibration result\n"); "using PIT calibration result\n");
return; goto out;
} }
tsc2 = (tsc2 - tsc1) * 1000000L; tsc2 = (tsc2 - tsc1) * 1000000L;
...@@ -255,6 +255,7 @@ void __init tsc_calibrate(void) ...@@ -255,6 +255,7 @@ void __init tsc_calibrate(void)
tsc_khz = tsc2 / tsc1; tsc_khz = tsc2 / tsc1;
out:
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
set_cyc2ns_scale(tsc_khz, cpu); set_cyc2ns_scale(tsc_khz, cpu);
} }
......
...@@ -1996,7 +1996,7 @@ static struct shrinker mmu_shrinker = { ...@@ -1996,7 +1996,7 @@ static struct shrinker mmu_shrinker = {
.seeks = DEFAULT_SEEKS * 10, .seeks = DEFAULT_SEEKS * 10,
}; };
void mmu_destroy_caches(void) static void mmu_destroy_caches(void)
{ {
if (pte_chain_cache) if (pte_chain_cache)
kmem_cache_destroy(pte_chain_cache); kmem_cache_destroy(pte_chain_cache);
......
...@@ -106,9 +106,9 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) ...@@ -106,9 +106,9 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
do_realtime((struct timespec *)tv); do_realtime((struct timespec *)tv);
tv->tv_usec /= 1000; tv->tv_usec /= 1000;
if (unlikely(tz != NULL)) { if (unlikely(tz != NULL)) {
/* This relies on gcc inlining the memcpy. We'll notice /* Avoid memcpy. Some old compilers fail to inline it */
if it ever fails to do so. */ tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
memcpy(tz, &gtod->sys_tz, sizeof(struct timezone)); tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
} }
return 0; return 0;
} }
......
...@@ -60,7 +60,7 @@ xmaddr_t arbitrary_virt_to_machine(unsigned long address) ...@@ -60,7 +60,7 @@ xmaddr_t arbitrary_virt_to_machine(unsigned long address)
{ {
unsigned int level; unsigned int level;
pte_t *pte = lookup_address(address, &level); pte_t *pte = lookup_address(address, &level);
unsigned offset = address & PAGE_MASK; unsigned offset = address & ~PAGE_MASK;
BUG_ON(pte == NULL); BUG_ON(pte == NULL);
......
...@@ -22,12 +22,23 @@ static inline void __native_flush_tlb(void) ...@@ -22,12 +22,23 @@ static inline void __native_flush_tlb(void)
static inline void __native_flush_tlb_global(void) static inline void __native_flush_tlb_global(void)
{ {
unsigned long cr4 = read_cr4(); unsigned long flags;
unsigned long cr4;
/*
* Read-modify-write to CR4 - protect it from preemption and
* from interrupts. (Use the raw variant because this code can
* be called from deep inside debugging code.)
*/
raw_local_irq_save(flags);
cr4 = read_cr4();
/* clear PGE */ /* clear PGE */
write_cr4(cr4 & ~X86_CR4_PGE); write_cr4(cr4 & ~X86_CR4_PGE);
/* write old PGE again and flush TLBs */ /* write old PGE again and flush TLBs */
write_cr4(cr4); write_cr4(cr4);
raw_local_irq_restore(flags);
} }
static inline void __native_flush_tlb_single(unsigned long addr) static inline void __native_flush_tlb_single(unsigned long addr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment