Commit 60916285 authored by Ingo Molnar's avatar Ingo Molnar

Merge branches 'x86/asm', 'x86/cleanups' and 'x86/headers' into x86/core

...@@ -25,14 +25,12 @@ ...@@ -25,14 +25,12 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/boot.h> #include <asm/boot.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
.section ".text.head","ax",@progbits .section ".text.head","ax",@progbits
.globl startup_32 ENTRY(startup_32)
startup_32:
cld cld
/* test KEEP_SEGMENTS flag to see if the bootloader is asking /* test KEEP_SEGMENTS flag to see if the bootloader is asking
* us to not reload segments */ * us to not reload segments */
...@@ -113,6 +111,8 @@ startup_32: ...@@ -113,6 +111,8 @@ startup_32:
*/ */
leal relocated(%ebx), %eax leal relocated(%ebx), %eax
jmp *%eax jmp *%eax
ENDPROC(startup_32)
.section ".text" .section ".text"
relocated: relocated:
......
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/pgtable.h> #include <asm/pgtable_types.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/boot.h> #include <asm/boot.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
...@@ -35,9 +35,7 @@ ...@@ -35,9 +35,7 @@
.section ".text.head" .section ".text.head"
.code32 .code32
.globl startup_32 ENTRY(startup_32)
startup_32:
cld cld
/* test KEEP_SEGMENTS flag to see if the bootloader is asking /* test KEEP_SEGMENTS flag to see if the bootloader is asking
* us to not reload segments */ * us to not reload segments */
...@@ -176,6 +174,7 @@ startup_32: ...@@ -176,6 +174,7 @@ startup_32:
/* Jump from 32bit compatibility mode into 64bit mode. */ /* Jump from 32bit compatibility mode into 64bit mode. */
lret lret
ENDPROC(startup_32)
no_longmode: no_longmode:
/* This isn't an x86-64 CPU so hang */ /* This isn't an x86-64 CPU so hang */
...@@ -295,7 +294,6 @@ relocated: ...@@ -295,7 +294,6 @@ relocated:
call decompress_kernel call decompress_kernel
popq %rsi popq %rsi
/* /*
* Jump to the decompressed kernel. * Jump to the decompressed kernel.
*/ */
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
* *
* ----------------------------------------------------------------------- */ * ----------------------------------------------------------------------- */
#include <linux/linkage.h>
/* /*
* Memory copy routines * Memory copy routines
*/ */
...@@ -15,9 +17,7 @@ ...@@ -15,9 +17,7 @@
.code16gcc .code16gcc
.text .text
.globl memcpy GLOBAL(memcpy)
.type memcpy, @function
memcpy:
pushw %si pushw %si
pushw %di pushw %di
movw %ax, %di movw %ax, %di
...@@ -31,11 +31,9 @@ memcpy: ...@@ -31,11 +31,9 @@ memcpy:
popw %di popw %di
popw %si popw %si
ret ret
.size memcpy, .-memcpy ENDPROC(memcpy)
.globl memset GLOBAL(memset)
.type memset, @function
memset:
pushw %di pushw %di
movw %ax, %di movw %ax, %di
movzbl %dl, %eax movzbl %dl, %eax
...@@ -48,52 +46,42 @@ memset: ...@@ -48,52 +46,42 @@ memset:
rep; stosb rep; stosb
popw %di popw %di
ret ret
.size memset, .-memset ENDPROC(memset)
.globl copy_from_fs GLOBAL(copy_from_fs)
.type copy_from_fs, @function
copy_from_fs:
pushw %ds pushw %ds
pushw %fs pushw %fs
popw %ds popw %ds
call memcpy call memcpy
popw %ds popw %ds
ret ret
.size copy_from_fs, .-copy_from_fs ENDPROC(copy_from_fs)
.globl copy_to_fs GLOBAL(copy_to_fs)
.type copy_to_fs, @function
copy_to_fs:
pushw %es pushw %es
pushw %fs pushw %fs
popw %es popw %es
call memcpy call memcpy
popw %es popw %es
ret ret
.size copy_to_fs, .-copy_to_fs ENDPROC(copy_to_fs)
#if 0 /* Not currently used, but can be enabled as needed */ #if 0 /* Not currently used, but can be enabled as needed */
GLOBAL(copy_from_gs)
.globl copy_from_gs
.type copy_from_gs, @function
copy_from_gs:
pushw %ds pushw %ds
pushw %gs pushw %gs
popw %ds popw %ds
call memcpy call memcpy
popw %ds popw %ds
ret ret
.size copy_from_gs, .-copy_from_gs ENDPROC(copy_from_gs)
.globl copy_to_gs
.type copy_to_gs, @function GLOBAL(copy_to_gs)
copy_to_gs:
pushw %es pushw %es
pushw %gs pushw %gs
popw %es popw %es
call memcpy call memcpy
popw %es popw %es
ret ret
.size copy_to_gs, .-copy_to_gs ENDPROC(copy_to_gs)
#endif #endif
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <linux/utsrelease.h> #include <linux/utsrelease.h>
#include <asm/boot.h> #include <asm/boot.h>
#include <asm/e820.h> #include <asm/e820.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/setup.h> #include <asm/setup.h>
#include "boot.h" #include "boot.h"
#include "offsets.h" #include "offsets.h"
......
...@@ -15,18 +15,15 @@ ...@@ -15,18 +15,15 @@
#include <asm/boot.h> #include <asm/boot.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <linux/linkage.h>
.text .text
.globl protected_mode_jump
.type protected_mode_jump, @function
.code16 .code16
/* /*
* void protected_mode_jump(u32 entrypoint, u32 bootparams); * void protected_mode_jump(u32 entrypoint, u32 bootparams);
*/ */
protected_mode_jump: GLOBAL(protected_mode_jump)
movl %edx, %esi # Pointer to boot_params table movl %edx, %esi # Pointer to boot_params table
xorl %ebx, %ebx xorl %ebx, %ebx
...@@ -47,12 +44,10 @@ protected_mode_jump: ...@@ -47,12 +44,10 @@ protected_mode_jump:
.byte 0x66, 0xea # ljmpl opcode .byte 0x66, 0xea # ljmpl opcode
2: .long in_pm32 # offset 2: .long in_pm32 # offset
.word __BOOT_CS # segment .word __BOOT_CS # segment
ENDPROC(protected_mode_jump)
.size protected_mode_jump, .-protected_mode_jump
.code32 .code32
.type in_pm32, @function GLOBAL(in_pm32)
in_pm32:
# Set up data segments for flat 32-bit mode # Set up data segments for flat 32-bit mode
movl %ecx, %ds movl %ecx, %ds
movl %ecx, %es movl %ecx, %es
...@@ -78,5 +73,4 @@ in_pm32: ...@@ -78,5 +73,4 @@ in_pm32:
lldt %cx lldt %cx
jmpl *%eax # Jump to the 32-bit entrypoint jmpl *%eax # Jump to the 32-bit entrypoint
ENDPROC(in_pm32)
.size in_pm32, .-in_pm32
...@@ -124,9 +124,14 @@ static inline void *phys_to_virt(phys_addr_t address) ...@@ -124,9 +124,14 @@ static inline void *phys_to_virt(phys_addr_t address)
/* /*
* ISA I/O bus memory addresses are 1:1 with the physical address. * ISA I/O bus memory addresses are 1:1 with the physical address.
* However, we truncate the address to unsigned int to avoid undesirable
* promitions in legacy drivers.
*/ */
#define isa_virt_to_bus (unsigned long)virt_to_phys static inline unsigned int isa_virt_to_bus(volatile void *address)
#define isa_page_to_bus page_to_phys {
return (unsigned int)virt_to_phys(address);
}
#define isa_page_to_bus(page) ((unsigned int)page_to_phys(page))
#define isa_bus_to_virt phys_to_virt #define isa_bus_to_virt phys_to_virt
/* /*
......
...@@ -52,70 +52,14 @@ ...@@ -52,70 +52,14 @@
#endif #endif
#define GLOBAL(name) \
.globl name; \
name:
#ifdef CONFIG_X86_ALIGNMENT_16 #ifdef CONFIG_X86_ALIGNMENT_16
#define __ALIGN .align 16,0x90 #define __ALIGN .align 16,0x90
#define __ALIGN_STR ".align 16,0x90" #define __ALIGN_STR ".align 16,0x90"
#endif #endif
/*
* to check ENTRY_X86/END_X86 and
* KPROBE_ENTRY_X86/KPROBE_END_X86
* unbalanced-missed-mixed appearance
*/
#define __set_entry_x86 .set ENTRY_X86_IN, 0
#define __unset_entry_x86 .set ENTRY_X86_IN, 1
#define __set_kprobe_x86 .set KPROBE_X86_IN, 0
#define __unset_kprobe_x86 .set KPROBE_X86_IN, 1
#define __macro_err_x86 .error "ENTRY_X86/KPROBE_X86 unbalanced,missed,mixed"
#define __check_entry_x86 \
.ifdef ENTRY_X86_IN; \
.ifeq ENTRY_X86_IN; \
__macro_err_x86; \
.abort; \
.endif; \
.endif
#define __check_kprobe_x86 \
.ifdef KPROBE_X86_IN; \
.ifeq KPROBE_X86_IN; \
__macro_err_x86; \
.abort; \
.endif; \
.endif
#define __check_entry_kprobe_x86 \
__check_entry_x86; \
__check_kprobe_x86
#define ENTRY_KPROBE_FINAL_X86 __check_entry_kprobe_x86
#define ENTRY_X86(name) \
__check_entry_kprobe_x86; \
__set_entry_x86; \
.globl name; \
__ALIGN; \
name:
#define END_X86(name) \
__unset_entry_x86; \
__check_entry_kprobe_x86; \
.size name, .-name
#define KPROBE_ENTRY_X86(name) \
__check_entry_kprobe_x86; \
__set_kprobe_x86; \
.pushsection .kprobes.text, "ax"; \
.globl name; \
__ALIGN; \
name:
#define KPROBE_END_X86(name) \
__unset_kprobe_x86; \
__check_entry_kprobe_x86; \
.size name, .-name; \
.popsection
#endif /* _ASM_X86_LINKAGE_H */ #endif /* _ASM_X86_LINKAGE_H */
...@@ -33,12 +33,10 @@ ...@@ -33,12 +33,10 @@
/* 44=32+12, the limit we can fit into an unsigned long pfn */ /* 44=32+12, the limit we can fit into an unsigned long pfn */
#define __PHYSICAL_MASK_SHIFT 44 #define __PHYSICAL_MASK_SHIFT 44
#define __VIRTUAL_MASK_SHIFT 32 #define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 3
#else /* !CONFIG_X86_PAE */ #else /* !CONFIG_X86_PAE */
#define __PHYSICAL_MASK_SHIFT 32 #define __PHYSICAL_MASK_SHIFT 32
#define __VIRTUAL_MASK_SHIFT 32 #define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 2
#endif /* CONFIG_X86_PAE */ #endif /* CONFIG_X86_PAE */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
#ifndef _ASM_X86_PAGE_64_DEFS_H #ifndef _ASM_X86_PAGE_64_DEFS_H
#define _ASM_X86_PAGE_64_DEFS_H #define _ASM_X86_PAGE_64_DEFS_H
#define PAGETABLE_LEVELS 4
#define THREAD_ORDER 1 #define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE - 1)) #define CURRENT_MASK (~(THREAD_SIZE - 1))
......
...@@ -16,12 +16,6 @@ ...@@ -16,12 +16,6 @@
(ie, 32-bit PAE). */ (ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) #define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
......
...@@ -17,6 +17,7 @@ typedef union { ...@@ -17,6 +17,7 @@ typedef union {
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0 #define SHARED_KERNEL_PMD 0
#define PAGETABLE_LEVELS 2
/* /*
* traditional i386 two-level paging structure: * traditional i386 two-level paging structure:
...@@ -25,6 +26,7 @@ typedef union { ...@@ -25,6 +26,7 @@ typedef union {
#define PGDIR_SHIFT 22 #define PGDIR_SHIFT 22
#define PTRS_PER_PGD 1024 #define PTRS_PER_PGD 1024
/* /*
* the i386 is two-level, so we don't really have any * the i386 is two-level, so we don't really have any
* PMD directory physically. * PMD directory physically.
......
...@@ -24,6 +24,8 @@ typedef union { ...@@ -24,6 +24,8 @@ typedef union {
#define SHARED_KERNEL_PMD 1 #define SHARED_KERNEL_PMD 1
#endif #endif
#define PAGETABLE_LEVELS 3
/* /*
* PGDIR_SHIFT determines what a top-level page table entry can map * PGDIR_SHIFT determines what a top-level page table entry can map
*/ */
......
...@@ -18,6 +18,7 @@ typedef struct { pteval_t pte; } pte_t; ...@@ -18,6 +18,7 @@ typedef struct { pteval_t pte; } pte_t;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0 #define SHARED_KERNEL_PMD 0
#define PAGETABLE_LEVELS 4
/* /*
* PGDIR_SHIFT determines what a top-level page table entry can map * PGDIR_SHIFT determines what a top-level page table entry can map
......
...@@ -173,6 +173,12 @@ ...@@ -173,6 +173,12 @@
#include <linux/types.h> #include <linux/types.h>
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
typedef struct { pgdval_t pgd; } pgd_t; typedef struct { pgdval_t pgd; } pgd_t;
......
...@@ -403,7 +403,6 @@ DECLARE_PER_CPU(unsigned long, stack_canary); ...@@ -403,7 +403,6 @@ DECLARE_PER_CPU(unsigned long, stack_canary);
#endif #endif
#endif /* X86_64 */ #endif /* X86_64 */
extern void print_cpu_info(struct cpuinfo_x86 *);
extern unsigned int xstate_size; extern unsigned int xstate_size;
extern void free_thread_xstate(struct task_struct *); extern void free_thread_xstate(struct task_struct *);
extern struct kmem_cache *task_xstate_cachep; extern struct kmem_cache *task_xstate_cachep;
......
...@@ -82,7 +82,7 @@ asmlinkage long sys_iopl(unsigned int, struct pt_regs *); ...@@ -82,7 +82,7 @@ asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
/* kernel/signal_64.c */ /* kernel/signal_64.c */
asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
struct pt_regs *); struct pt_regs *);
asmlinkage long sys_rt_sigreturn(struct pt_regs *); long sys_rt_sigreturn(struct pt_regs *);
/* kernel/sys_x86_64.c */ /* kernel/sys_x86_64.c */
asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long, asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
......
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
*/ */
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/msr-index.h> #include <asm/msr-index.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/pgtable.h> #include <asm/pgtable_types.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
.code16 .code16
......
.section .text.page_aligned .section .text.page_aligned
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/page.h> #include <asm/page_types.h>
# Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 # Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
......
.text .text
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/pgtable.h> #include <asm/pgtable_types.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
......
...@@ -204,12 +204,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy) ...@@ -204,12 +204,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
} }
/* Enable Enhanced PowerSaver */ /* Enable Enhanced PowerSaver */
rdmsrl(MSR_IA32_MISC_ENABLE, val); rdmsrl(MSR_IA32_MISC_ENABLE, val);
if (!(val & 1 << 16)) { if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
val |= 1 << 16; val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
wrmsrl(MSR_IA32_MISC_ENABLE, val); wrmsrl(MSR_IA32_MISC_ENABLE, val);
/* Can be locked at 0 */ /* Can be locked at 0 */
rdmsrl(MSR_IA32_MISC_ENABLE, val); rdmsrl(MSR_IA32_MISC_ENABLE, val);
if (!(val & 1 << 16)) { if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -390,14 +390,14 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) ...@@ -390,14 +390,14 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
enable it if not. */ enable it if not. */
rdmsr(MSR_IA32_MISC_ENABLE, l, h); rdmsr(MSR_IA32_MISC_ENABLE, l, h);
if (!(l & (1<<16))) { if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
l |= (1<<16); l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); dprintk("trying to enable Enhanced SpeedStep (%x)\n", l);
wrmsr(MSR_IA32_MISC_ENABLE, l, h); wrmsr(MSR_IA32_MISC_ENABLE, l, h);
/* check to see if it stuck */ /* check to see if it stuck */
rdmsr(MSR_IA32_MISC_ENABLE, l, h); rdmsr(MSR_IA32_MISC_ENABLE, l, h);
if (!(l & (1<<16))) { if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
printk(KERN_INFO PFX printk(KERN_INFO PFX
"couldn't enable Enhanced SpeedStep\n"); "couldn't enable Enhanced SpeedStep\n");
return -ENODEV; return -ENODEV;
......
...@@ -146,10 +146,10 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) ...@@ -146,10 +146,10 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
*/ */
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
if ((lo & (1<<9)) == 0) { if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
lo |= (1<<9); /* Disable hw prefetching */ lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
} }
} }
......
...@@ -49,13 +49,13 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) ...@@ -49,13 +49,13 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
*/ */
rdmsr(MSR_IA32_MISC_ENABLE, l, h); rdmsr(MSR_IA32_MISC_ENABLE, l, h);
h = apic_read(APIC_LVTTHMR); h = apic_read(APIC_LVTTHMR);
if ((l & (1 << 3)) && (h & APIC_DM_SMI)) { if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
printk(KERN_DEBUG printk(KERN_DEBUG
"CPU%d: Thermal monitoring handled by SMI\n", cpu); "CPU%d: Thermal monitoring handled by SMI\n", cpu);
return; return;
} }
if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2))
tm2 = 1; tm2 = 1;
if (h & APIC_VECTOR_MASK) { if (h & APIC_VECTOR_MASK) {
...@@ -73,7 +73,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) ...@@ -73,7 +73,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h);
rdmsr(MSR_IA32_MISC_ENABLE, l, h); rdmsr(MSR_IA32_MISC_ENABLE, l, h);
wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h); wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
l = apic_read(APIC_LVTTHMR); l = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
......
...@@ -85,7 +85,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) ...@@ -85,7 +85,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
*/ */
rdmsr(MSR_IA32_MISC_ENABLE, l, h); rdmsr(MSR_IA32_MISC_ENABLE, l, h);
h = apic_read(APIC_LVTTHMR); h = apic_read(APIC_LVTTHMR);
if ((l & (1<<3)) && (h & APIC_DM_SMI)) { if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",
cpu); cpu);
return; /* -EBUSY */ return; /* -EBUSY */
...@@ -111,7 +111,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) ...@@ -111,7 +111,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
vendor_thermal_interrupt = intel_thermal_interrupt; vendor_thermal_interrupt = intel_thermal_interrupt;
rdmsr(MSR_IA32_MISC_ENABLE, l, h); rdmsr(MSR_IA32_MISC_ENABLE, l, h);
wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h); wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
l = apic_read(APIC_LVTTHMR); l = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/page.h> #include <asm/page_types.h>
/* /*
* efi_call_phys(void *, ...) is a function with variable parameters. * efi_call_phys(void *, ...) is a function with variable parameters.
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/dwarf2.h> #include <asm/dwarf2.h>
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/irqflags.h> #include <asm/irqflags.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
......
...@@ -11,8 +11,8 @@ ...@@ -11,8 +11,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/pgtable.h> #include <asm/pgtable_types.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
......
...@@ -7,10 +7,10 @@ ...@@ -7,10 +7,10 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/pgtable.h> #include <asm/pgtable_types.h>
/* /*
* Must be relocatable PIC code callable as a C function * Must be relocatable PIC code callable as a C function
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/page.h> #include <asm/page_types.h>
/* We can free up trampoline after bootup if cpu hotplug is not supported. */ /* We can free up trampoline after bootup if cpu hotplug is not supported. */
#ifndef CONFIG_HOTPLUG_CPU #ifndef CONFIG_HOTPLUG_CPU
......
...@@ -25,8 +25,8 @@ ...@@ -25,8 +25,8 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/pgtable.h> #include <asm/pgtable_types.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <asm-generic/vmlinux.lds.h> #include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/boot.h> #include <asm/boot.h>
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <asm-generic/vmlinux.lds.h> #include <asm-generic/vmlinux.lds.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/page.h> #include <asm/page_types.h>
#undef i386 /* in case the preprocessor is a 32bit one */ #undef i386 /* in case the preprocessor is a 32bit one */
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h> #include <asm/dwarf2.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
.text .text
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <asm/boot.h> #include <asm/boot.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/page.h> #include <asm/page_types.h>
#include <xen/interface/elfnote.h> #include <xen/interface/elfnote.h>
#include <asm/xen/interface.h> #include <asm/xen/interface.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment