Commit 0b6de009 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/apic' into perfcounters/core

Conflicts:
	arch/x86/kernel/cpu/perfctr-watchdog.c
parents 37a25424 f62bae50
......@@ -137,7 +137,7 @@ static void cn_test_timer_func(unsigned long __data)
memcpy(m + 1, data, m->len);
cn_netlink_send(m, 0, gfp_any());
cn_netlink_send(m, 0, GFP_ATOMIC);
kfree(m);
}
......@@ -160,10 +160,8 @@ static int cn_test_init(void)
goto err_out;
}
init_timer(&cn_test_timer);
cn_test_timer.function = cn_test_timer_func;
setup_timer(&cn_test_timer, cn_test_timer_func, 0);
cn_test_timer.expires = jiffies + HZ;
cn_test_timer.data = 0;
add_timer(&cn_test_timer);
return 0;
......
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 29
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Erotic Pickled Herring
# *DOCUMENTATION*
......
#ifndef _ALPHA_STATFS_H
#define _ALPHA_STATFS_H
#include <linux/types.h>
/* Alpha is the only 64-bit platform with 32-bit statfs. And doesn't
even seem to implement statfs64 */
#define __statfs_word __u32
......
#ifndef _ALPHA_SWAB_H
#define _ALPHA_SWAB_H
#include <asm/types.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/compiler.h>
......
......@@ -2,7 +2,7 @@
#define __ARM_A_OUT_H__
#include <linux/personality.h>
#include <asm/types.h>
#include <linux/types.h>
struct exec
{
......
......@@ -14,7 +14,7 @@
#ifndef __ASMARM_SETUP_H
#define __ASMARM_SETUP_H
#include <asm/types.h>
#include <linux/types.h>
#define COMMAND_LINE_SIZE 1024
......
......@@ -16,7 +16,7 @@
#define __ASM_ARM_SWAB_H
#include <linux/compiler.h>
#include <asm/types.h>
#include <linux/types.h>
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __SWAB_64_THRU_32__
......
......@@ -4,7 +4,7 @@
#ifndef __ASM_AVR32_SWAB_H
#define __ASM_AVR32_SWAB_H
#include <asm/types.h>
#include <linux/types.h>
#include <linux/compiler.h>
#define __SWAB_64_THRU_32__
......
#ifndef _BLACKFIN_SWAB_H
#define _BLACKFIN_SWAB_H
#include <asm/types.h>
#include <linux/types.h>
#include <linux/compiler.h>
#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
......
#ifndef _H8300_SWAB_H
#define _H8300_SWAB_H
#include <asm/types.h>
#include <linux/types.h>
#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __SWAB_64_THRU_32__
......
......@@ -6,8 +6,6 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/types.h>
/* floating point status register: */
#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */
#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */
......
......@@ -6,6 +6,7 @@
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
*/
#include <linux/types.h>
#include <linux/compiler.h>
/* define this macro to get some asm stmts included in 'c' files */
......
......@@ -10,6 +10,7 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
/* include compiler specific intrinsics */
#include <asm/ia64regs.h>
#ifdef __INTEL_COMPILER
......
......@@ -21,8 +21,7 @@
*
*/
#include <asm/types.h>
#include <linux/types.h>
#include <linux/ioctl.h>
/* Architectural interrupt line count. */
......
......@@ -6,7 +6,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
*/
#include <asm/types.h>
#include <linux/types.h>
#include <asm/intrinsics.h>
#include <linux/compiler.h>
......
......@@ -199,7 +199,7 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
return __va(phys_addr);
}
char *__init __acpi_unmap_table(unsigned long virt_addr, unsigned long size)
void __init __acpi_unmap_table(char *map, unsigned long size)
{
}
......
......@@ -9,6 +9,7 @@
#ifndef _ASM_SIGCONTEXT_H
#define _ASM_SIGCONTEXT_H
#include <linux/types.h>
#include <asm/sgidefs.h>
#if _MIPS_SIM == _MIPS_SIM_ABI32
......
......@@ -9,7 +9,7 @@
#define _ASM_SWAB_H
#include <linux/compiler.h>
#include <asm/types.h>
#include <linux/types.h>
#define __SWAB_64_THRU_32__
......
......@@ -336,10 +336,11 @@
#define NUM_PDC_RESULT 32
#if !defined(__ASSEMBLY__)
#ifdef __KERNEL__
#include <linux/types.h>
#ifdef __KERNEL__
extern int pdc_type;
/* Values for pdc_type */
......
#ifndef _PARISC_SWAB_H
#define _PARISC_SWAB_H
#include <asm/types.h>
#include <linux/types.h>
#include <linux/compiler.h>
#define __SWAB_64_THRU_32__
......
......@@ -9,7 +9,7 @@
#ifndef __ASM_BOOTX_H__
#define __ASM_BOOTX_H__
#include <asm/types.h>
#include <linux/types.h>
#ifdef macintosh
#include <Types.h>
......
......@@ -7,7 +7,7 @@
#include <asm/string.h>
#endif
#include <asm/types.h>
#include <linux/types.h>
#include <asm/ptrace.h>
#include <asm/cputable.h>
#include <asm/auxvec.h>
......
......@@ -20,7 +20,7 @@
#ifndef __LINUX_KVM_POWERPC_H
#define __LINUX_KVM_POWERPC_H
#include <asm/types.h>
#include <linux/types.h>
struct kvm_regs {
__u64 pc;
......
......@@ -19,6 +19,7 @@
#ifndef _ASM_POWERPC_PS3FB_H_
#define _ASM_POWERPC_PS3FB_H_
#include <linux/types.h>
#include <linux/ioctl.h>
/* ioctl */
......
......@@ -23,9 +23,10 @@
#ifndef _SPU_INFO_H
#define _SPU_INFO_H
#include <linux/types.h>
#ifdef __KERNEL__
#include <asm/spu.h>
#include <linux/types.h>
#else
struct mfc_cq_sr {
__u64 mfc_cq_data0_RW;
......
......@@ -8,7 +8,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <asm/types.h>
#include <linux/types.h>
#include <linux/compiler.h>
#ifdef __GNUC__
......
......@@ -235,6 +235,20 @@ config SMP
If you don't know what to do here, say N.
config X86_X2APIC
bool "Support x2apic"
depends on X86_LOCAL_APIC && X86_64
---help---
This enables x2apic support on CPUs that have this feature.
This allows 32-bit apic IDs (so it can support very large systems),
and accesses the local apic via MSRs not via mmio.
( On certain CPU models you may need to enable INTR_REMAP too,
to get functional x2apic mode. )
If you don't know what to do here, say N.
config SPARSE_IRQ
bool "Support sparse irq numbering"
depends on PCI_MSI || HT_IRQ
......@@ -302,6 +316,7 @@ config X86_UV
bool "SGI Ultraviolet"
depends on X86_64
depends on X86_EXTENDED_PLATFORM
select X86_X2APIC
---help---
This option is needed in order to support SGI Ultraviolet systems.
If you don't have one of these, you should say N here.
......@@ -1829,6 +1844,7 @@ config DMAR_FLOPPY_WA
config INTR_REMAP
bool "Support for Interrupt Remapping (EXPERIMENTAL)"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
select X86_X2APIC
---help---
Supports Interrupt remapping for IO-APIC and MSI devices.
To use x2apic mode in the CPU's which support x2APIC enhancements or
......
This diff is collapsed.
#ifndef _ASM_X86_GENAPIC_H
#define _ASM_X86_GENAPIC_H
#include <linux/cpumask.h>
#include <asm/mpspec.h>
#include <asm/atomic.h>
/*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
*
* Generic APIC sub-arch data struct.
*
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
struct genapic {
char *name;
int (*probe)(void);
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
int (*apic_id_registered)(void);
u32 irq_delivery_mode;
u32 irq_dest_mode;
const struct cpumask *(*target_cpus)(void);
int disable_esr;
int dest_logical;
unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
unsigned long (*check_apicid_present)(int apicid);
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
void (*init_apic_ldr)(void);
physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
void (*setup_apic_routing)(void);
int (*multi_timer_check)(int apic, int irq);
int (*apicid_to_node)(int logical_apicid);
int (*cpu_to_logical_apicid)(int cpu);
int (*cpu_present_to_apicid)(int mps_cpu);
physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
void (*setup_portio_remap)(void);
int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
void (*enable_apic_mode)(void);
int (*phys_pkg_id)(int cpuid_apic, int index_msb);
/*
* When one of the next two hooks returns 1 the genapic
* is switched to this. Essentially they are additional
* probe functions:
*/
int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
unsigned int (*get_apic_id)(unsigned long x);
unsigned long (*set_apic_id)(unsigned int id);
unsigned long apic_id_mask;
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask);
/* ipi */
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
void (*send_IPI_self)(int vector);
/* wakeup_secondary_cpu */
int (*wakeup_cpu)(int apicid, unsigned long start_eip);
int trampoline_phys_low;
int trampoline_phys_high;
void (*wait_for_init_deassert)(atomic_t *deassert);
void (*smp_callin_clear_local_apic)(void);
void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
void (*inquire_remote_apic)(int apicid);
};
extern struct genapic *apic;
/*
* Warm reset vector default position:
*/
#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
#ifdef CONFIG_X86_32
extern void es7000_update_genapic_to_cluster(void);
#else
extern struct genapic apic_flat;
extern struct genapic apic_physflat;
extern struct genapic apic_x2apic_cluster;
extern struct genapic apic_x2apic_phys;
extern int default_acpi_madt_oem_check(char *, char *);
extern void apic_send_IPI_self(int vector);
extern struct genapic apic_x2apic_uv_x;
DECLARE_PER_CPU(int, x2apic_extra_bits);
extern void default_setup_apic_routing(void);
extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
#endif
static inline void default_wait_for_init_deassert(atomic_t *deassert)
{
while (!atomic_read(deassert))
cpu_relax();
return;
}
extern void generic_bigsmp_probe(void);
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/smp.h>
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
static inline const struct cpumask *default_target_cpus(void)
{
#ifdef CONFIG_SMP
return cpu_online_mask;
#else
return cpumask_of(0);
#endif
}
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
static inline unsigned int read_apic_id(void)
{
unsigned int reg;
reg = apic_read(APIC_ID);
return apic->get_apic_id(reg);
}
#ifdef CONFIG_X86_64
extern void default_setup_apic_routing(void);
#else
/*
* Set up the logical destination ID.
*
* Intel recommends to set DFR, LDR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
extern void default_init_apic_ldr(void);
static inline int default_apic_id_registered(void)
{
return physid_isset(read_apic_id(), phys_cpu_present_map);
}
static inline unsigned int
default_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
return cpumask_bits(cpumask)[0];
}
static inline unsigned int
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
unsigned long mask1 = cpumask_bits(cpumask)[0];
unsigned long mask2 = cpumask_bits(andmask)[0];
unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
return (unsigned int)(mask1 & mask2 & mask3);
}
static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
{
return cpuid_apic >> index_msb;
}
static inline void default_setup_apic_routing(void)
{
#ifdef CONFIG_X86_IO_APIC
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
"Flat", nr_ioapics);
#endif
}
extern int default_apicid_to_node(int logical_apicid);
#endif
static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
{
return physid_isset(apicid, bitmap);
}
static inline unsigned long default_check_apicid_present(int bit)
{
return physid_isset(bit, phys_cpu_present_map);
}
static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
{
return phys_map;
}
/* Mapping from cpu number to logical apicid */
static inline int default_cpu_to_logical_apicid(int cpu)
{
return 1 << cpu;
}
static inline int __default_cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
else
return BAD_APICID;
}
static inline int
__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
{
return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
}
#ifdef CONFIG_X86_32
static inline int default_cpu_present_to_apicid(int mps_cpu)
{
return __default_cpu_present_to_apicid(mps_cpu);
}
static inline int
default_check_phys_apicid_present(int boot_cpu_physical_apicid)
{
return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
}
#else
extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
#endif
static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
{
return physid_mask_of_physid(phys_apicid);
}
#endif /* CONFIG_X86_LOCAL_APIC */
#endif /* _ASM_X86_GENAPIC_64_H */
#include <asm/apic.h>
......@@ -125,7 +125,7 @@ static inline void *phys_to_virt(phys_addr_t address)
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_virt_to_bus (unsigned long)virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt
......
......@@ -123,8 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
int vector);
extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
int vector);
#include <asm/genapic.h>
extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
int vector);
extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
......
......@@ -167,6 +167,4 @@ extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
extern int default_acpi_madt_oem_check(char *, char *);
extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
#endif /* _ASM_X86_MPSPEC_H */
#ifndef _ASM_X86_PAGE_H
#define _ASM_X86_PAGE_H
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#include <linux/types.h>
#ifdef __KERNEL__
#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
/* Cast PAGE_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HUGE_MAX_HSTATE 2
#ifndef __ASSEMBLY__
#include <linux/types.h>
#endif
#include <asm/page_types.h>
#ifdef CONFIG_X86_64
#include <asm/page_64.h>
......@@ -44,38 +13,18 @@
#include <asm/page_32.h>
#endif /* CONFIG_X86_64 */
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define VM_DATA_DEFAULT_FLAGS \
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#ifndef __ASSEMBLY__
typedef struct { pgdval_t pgd; } pgd_t;
typedef struct { pgprotval_t pgprot; } pgprot_t;
extern int page_is_ram(unsigned long pagenr);
extern int devmem_is_allowed(unsigned long pagenr);
extern void map_devmem(unsigned long pfn, unsigned long size,
pgprot_t vma_prot);
extern void unmap_devmem(unsigned long pfn, unsigned long size,
pgprot_t vma_prot);
extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;
struct page;
static inline void clear_user_page(void *page, unsigned long vaddr,
struct page *pg)
struct page *pg)
{
clear_page(page);
}
static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
struct page *topage)
struct page *topage)
{
copy_page(to, from);
}
......@@ -84,114 +33,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
static inline pgd_t native_make_pgd(pgdval_t val)
{
return (pgd_t) { val };
}
static inline pgdval_t native_pgd_val(pgd_t pgd)
{
return pgd.pgd;
}
static inline pgdval_t pgd_flags(pgd_t pgd)
{
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
}
#if PAGETABLE_LEVELS >= 3
#if PAGETABLE_LEVELS == 4
typedef struct { pudval_t pud; } pud_t;
static inline pud_t native_make_pud(pmdval_t val)
{
return (pud_t) { val };
}
static inline pudval_t native_pud_val(pud_t pud)
{
return pud.pud;
}
#else /* PAGETABLE_LEVELS == 3 */
#include <asm-generic/pgtable-nopud.h>
static inline pudval_t native_pud_val(pud_t pud)
{
return native_pgd_val(pud.pgd);
}
#endif /* PAGETABLE_LEVELS == 4 */
static inline pudval_t pud_flags(pud_t pud)
{
return native_pud_val(pud) & PTE_FLAGS_MASK;
}
typedef struct { pmdval_t pmd; } pmd_t;
static inline pmd_t native_make_pmd(pmdval_t val)
{
return (pmd_t) { val };
}
static inline pmdval_t native_pmd_val(pmd_t pmd)
{
return pmd.pmd;
}
#else /* PAGETABLE_LEVELS == 2 */
#include <asm-generic/pgtable-nopmd.h>
static inline pmdval_t native_pmd_val(pmd_t pmd)
{
return native_pgd_val(pmd.pud.pgd);
}
#endif /* PAGETABLE_LEVELS >= 3 */
static inline pmdval_t pmd_flags(pmd_t pmd)
{
return native_pmd_val(pmd) & PTE_FLAGS_MASK;
}
static inline pte_t native_make_pte(pteval_t val)
{
return (pte_t) { .pte = val };
}
static inline pteval_t native_pte_val(pte_t pte)
{
return pte.pte;
}
static inline pteval_t pte_flags(pte_t pte)
{
return native_pte_val(pte) & PTE_FLAGS_MASK;
}
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT */
#define pgd_val(x) native_pgd_val(x)
#define __pgd(x) native_make_pgd(x)
#ifndef __PAGETABLE_PUD_FOLDED
#define pud_val(x) native_pud_val(x)
#define __pud(x) native_make_pud(x)
#endif
#ifndef __PAGETABLE_PMD_FOLDED
#define pmd_val(x) native_pmd_val(x)
#define __pmd(x) native_make_pmd(x)
#endif
#define pte_val(x) native_pte_val(x)
#define __pte(x) native_make_pte(x)
#endif /* CONFIG_PARAVIRT */
#define __pa(x) __phys_addr((unsigned long)(x))
#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
/* __pa_symbol should be used for C visible symbols.
......
#ifndef _ASM_X86_PAGE_32_H
#define _ASM_X86_PAGE_32_H
/*
* This handles the memory map.
*
* A __PAGE_OFFSET of 0xC0000000 means that the kernel has
* a virtual address space of one gigabyte, which limits the
* amount of physical memory you can use to about 950MB.
*
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G
* and CONFIG_HIGHMEM64G options in the kernel configuration.
*/
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#ifdef CONFIG_4KSTACKS
#define THREAD_ORDER 0
#else
#define THREAD_ORDER 1
#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define STACKFAULT_STACK 0
#define DOUBLEFAULT_STACK 1
#define NMI_STACK 0
#define DEBUG_STACK 0
#define MCE_STACK 0
#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
/* 44=32+12, the limit we can fit into an unsigned long pfn */
#define __PHYSICAL_MASK_SHIFT 44
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 3
#ifndef __ASSEMBLY__
typedef u64 pteval_t;
typedef u64 pmdval_t;
typedef u64 pudval_t;
typedef u64 pgdval_t;
typedef u64 pgprotval_t;
typedef union {
struct {
unsigned long pte_low, pte_high;
};
pteval_t pte;
} pte_t;
#endif /* __ASSEMBLY__
*/
#else /* !CONFIG_X86_PAE */
#define __PHYSICAL_MASK_SHIFT 32
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 2
#ifndef __ASSEMBLY__
typedef unsigned long pteval_t;
typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef union {
pteval_t pte;
pteval_t pte_low;
} pte_t;
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_X86_PAE */
#include <asm/page_32_types.h>
#ifndef __ASSEMBLY__
typedef struct page *pgtable_t;
#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#ifndef __ASSEMBLY__
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
#ifdef CONFIG_DEBUG_VIRTUAL
extern unsigned long __phys_addr(unsigned long);
......@@ -89,23 +21,6 @@ extern unsigned long __phys_addr(unsigned long);
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* CONFIG_FLATMEM */
extern int nx_enabled;
/*
* This much address space is reserved for vmalloc() and iomap()
* as well as fixmap mappings.
*/
extern unsigned int __VMALLOC_RESERVE;
extern int sysctl_legacy_va_layout;
extern void find_low_pfn_range(void);
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long, unsigned long);
extern void free_initmem(void);
extern void setup_bootmem_allocator(void);
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
......
#ifndef _ASM_X86_PAGE_32_DEFS_H
#define _ASM_X86_PAGE_32_DEFS_H
#include <linux/const.h>
/*
* This handles the memory map.
*
* A __PAGE_OFFSET of 0xC0000000 means that the kernel has
* a virtual address space of one gigabyte, which limits the
* amount of physical memory you can use to about 950MB.
*
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G
* and CONFIG_HIGHMEM64G options in the kernel configuration.
*/
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#ifdef CONFIG_4KSTACKS
#define THREAD_ORDER 0
#else
#define THREAD_ORDER 1
#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define STACKFAULT_STACK 0
#define DOUBLEFAULT_STACK 1
#define NMI_STACK 0
#define DEBUG_STACK 0
#define MCE_STACK 0
#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
/* 44=32+12, the limit we can fit into an unsigned long pfn */
#define __PHYSICAL_MASK_SHIFT 44
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 3
#else /* !CONFIG_X86_PAE */
#define __PHYSICAL_MASK_SHIFT 32
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 2
#endif /* CONFIG_X86_PAE */
#ifndef __ASSEMBLY__
/*
* This much address space is reserved for vmalloc() and iomap()
* as well as fixmap mappings.
*/
extern unsigned int __VMALLOC_RESERVE;
extern int sysctl_legacy_va_layout;
extern void find_low_pfn_range(void);
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long, unsigned long);
extern void free_initmem(void);
extern void setup_bootmem_allocator(void);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PAGE_32_DEFS_H */
#ifndef _ASM_X86_PAGE_64_H
#define _ASM_X86_PAGE_64_H
#define PAGETABLE_LEVELS 4
#define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE - 1))
#define EXCEPTION_STACK_ORDER 0
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
#define IRQ_STACK_ORDER 2
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
#define STACKFAULT_STACK 1
#define DOUBLEFAULT_STACK 2
#define NMI_STACK 3
#define DEBUG_STACK 4
#define MCE_STACK 5
#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
/*
* Set __PAGE_OFFSET to the most negative possible address +
* PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
* what Xen requires.
*/
#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
#define __PHYSICAL_START CONFIG_PHYSICAL_START
#define __KERNEL_ALIGN 0x200000
/*
* Make sure kernel is aligned to 2MB address. Catching it at compile
* time is better. Change your config file and compile the kernel
* for a 2MB aligned address (CONFIG_PHYSICAL_START)
*/
#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
#endif
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
/* See Documentation/x86_64/mm.txt for a description of the memory map. */
#define __PHYSICAL_MASK_SHIFT 46
#define __VIRTUAL_MASK_SHIFT 48
/*
* Kernel image size is limited to 512 MB (see level2_kernel_pgt in
* arch/x86/kernel/head_64.S), and it is mapped here:
*/
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
#ifndef __ASSEMBLY__
void clear_page(void *page);
void copy_page(void *to, void *from);
/* duplicated to the one in bootmem.h */
extern unsigned long max_pfn;
extern unsigned long phys_base;
extern unsigned long __phys_addr(unsigned long);
#define __phys_reloc_hide(x) (x)
/*
* These are used to make use of C type-checking..
*/
typedef unsigned long pteval_t;
typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef struct page *pgtable_t;
typedef struct { pteval_t pte; } pte_t;
#define vmemmap ((struct page *)VMEMMAP_START)
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
extern void free_initmem(void);
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) < max_pfn)
#endif
#include <asm/page_64_types.h>
#endif /* _ASM_X86_PAGE_64_H */
#ifndef _ASM_X86_PAGE_64_DEFS_H
#define _ASM_X86_PAGE_64_DEFS_H
#define PAGETABLE_LEVELS 4
#define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE - 1))
#define EXCEPTION_STACK_ORDER 0
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
#define IRQ_STACK_ORDER 2
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
#define STACKFAULT_STACK 1
#define DOUBLEFAULT_STACK 2
#define NMI_STACK 3
#define DEBUG_STACK 4
#define MCE_STACK 5
#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
/*
* Set __PAGE_OFFSET to the most negative possible address +
* PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
* what Xen requires.
*/
#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
#define __PHYSICAL_START CONFIG_PHYSICAL_START
#define __KERNEL_ALIGN 0x200000
/*
* Make sure kernel is aligned to 2MB address. Catching it at compile
* time is better. Change your config file and compile the kernel
* for a 2MB aligned address (CONFIG_PHYSICAL_START)
*/
#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
#endif
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
/* See Documentation/x86_64/mm.txt for a description of the memory map. */
#define __PHYSICAL_MASK_SHIFT 46
#define __VIRTUAL_MASK_SHIFT 48
/*
* Kernel image size is limited to 512 MB (see level2_kernel_pgt in
* arch/x86/kernel/head_64.S), and it is mapped here:
*/
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
#ifndef __ASSEMBLY__
void clear_page(void *page);
void copy_page(void *to, void *from);
/* duplicated to the one in bootmem.h */
extern unsigned long max_pfn;
extern unsigned long phys_base;
extern unsigned long __phys_addr(unsigned long);
#define __phys_reloc_hide(x) (x)
#define vmemmap ((struct page *)VMEMMAP_START)
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
extern void free_initmem(void);
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) < max_pfn)
#endif
#endif /* _ASM_X86_PAGE_64_DEFS_H */
#ifndef _ASM_X86_PAGE_DEFS_H
#define _ASM_X86_PAGE_DEFS_H
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
/* Cast PAGE_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HUGE_MAX_HSTATE 2
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define VM_DATA_DEFAULT_FLAGS \
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#ifdef CONFIG_X86_64
#include <asm/page_64_types.h>
#else
#include <asm/page_32_types.h>
#endif /* CONFIG_X86_64 */
#ifndef __ASSEMBLY__
struct pgprot;
extern int page_is_ram(unsigned long pagenr);
extern int devmem_is_allowed(unsigned long pagenr);
extern void map_devmem(unsigned long pfn, unsigned long size,
struct pgprot vma_prot);
extern void unmap_devmem(unsigned long pfn, unsigned long size,
struct pgprot vma_prot);
extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PAGE_DEFS_H */
......@@ -4,7 +4,7 @@
* para-virtualization: those hooks are defined here. */
#ifdef CONFIG_PARAVIRT
#include <asm/page.h>
#include <asm/pgtable_types.h>
#include <asm/asm.h>
/* Bitmask of what can be clobbered: usually at least eax. */
......
#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H
#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
typedef unsigned long pteval_t;
typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef union {
pteval_t pte;
pteval_t pte_low;
} pte_t;
#endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0
/*
......
#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H
#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
typedef u64 pteval_t;
typedef u64 pmdval_t;
typedef u64 pudval_t;
typedef u64 pgdval_t;
typedef u64 pgprotval_t;
typedef union {
struct {
unsigned long pte_low, pte_high;
};
pteval_t pte;
} pte_t;
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_PARAVIRT
#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
#else
......@@ -25,4 +42,5 @@
*/
#define PTRS_PER_PTE 512
#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */
This diff is collapsed.
#ifndef _ASM_X86_PGTABLE_32_H
#define _ASM_X86_PGTABLE_32_H
#include <asm/pgtable_32_types.h>
/*
* The Linux memory management assumes a three-level page table setup. On
......@@ -33,47 +34,6 @@ void paging_init(void);
extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
/*
* The Linux x86 paging architecture is 'compile-time dual-mode', it
* implements both the traditional 2-level x86 page tables and the
* newer 3-level PAE-mode page tables.
*/
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level-defs.h>
# define PMD_SIZE (1UL << PMD_SHIFT)
# define PMD_MASK (~(PMD_SIZE - 1))
#else
# include <asm/pgtable-2level-defs.h>
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8 * 1024 * 1024)
#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024
#endif
#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
& PMD_MASK)
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
#else
# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
#endif
#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
/*
* Define this if things work differently on an i386 and an i486:
......
#ifndef _ASM_X86_PGTABLE_32_DEFS_H
#define _ASM_X86_PGTABLE_32_DEFS_H
/*
* The Linux x86 paging architecture is 'compile-time dual-mode', it
* implements both the traditional 2-level x86 page tables and the
* newer 3-level PAE-mode page tables.
*/
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level_types.h>
# define PMD_SIZE (1UL << PMD_SHIFT)
# define PMD_MASK (~(PMD_SIZE - 1))
#else
# include <asm/pgtable-2level_types.h>
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8 * 1024 * 1024)
#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024
#endif
#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
& PMD_MASK)
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
#else
# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
#endif
#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
#endif /* _ASM_X86_PGTABLE_32_DEFS_H */
......@@ -2,6 +2,8 @@
#define _ASM_X86_PGTABLE_64_H
#include <linux/const.h>
#include <asm/pgtable_64_types.h>
#ifndef __ASSEMBLY__
/*
......@@ -25,32 +27,6 @@ extern void paging_init(void);
#endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
*/
#define PGDIR_SHIFT 39
#define PTRS_PER_PGD 512
/*
* 3rd level page
*/
#define PUD_SHIFT 30
#define PTRS_PER_PUD 512
/*
* PMD_SHIFT determines the size of the area a middle-level
* page table can map
*/
#define PMD_SHIFT 21
#define PTRS_PER_PMD 512
/*
* entries per page directory level
*/
#define PTRS_PER_PTE 512
#ifndef __ASSEMBLY__
#define pte_ERROR(e) \
......@@ -130,26 +106,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
native_set_pgd(pgd, native_make_pgd(0));
}
#endif /* !__ASSEMBLY__ */
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE - 1))
#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE - 1))
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
#define VMALLOC_START _AC(0xffffc20000000000, UL)
#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
#define VMEMMAP_START _AC(0xffffe20000000000, UL)
#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
#ifndef __ASSEMBLY__
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
......
#ifndef _ASM_X86_PGTABLE_64_DEFS_H
#define _ASM_X86_PGTABLE_64_DEFS_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
/*
* These are used to make use of C type-checking..
*/
typedef unsigned long pteval_t;
typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef struct { pteval_t pte; } pte_t;
#endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
*/
#define PGDIR_SHIFT 39
#define PTRS_PER_PGD 512
/*
* 3rd level page
*/
#define PUD_SHIFT 30
#define PTRS_PER_PUD 512
/*
* PMD_SHIFT determines the size of the area a middle-level
* page table can map
*/
#define PMD_SHIFT 21
#define PTRS_PER_PMD 512
/*
* entries per page directory level
*/
#define PTRS_PER_PTE 512
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE - 1))
#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE - 1))
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
#define VMALLOC_START _AC(0xffffc20000000000, UL)
#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
#define VMEMMAP_START _AC(0xffffe20000000000, UL)
#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
This diff is collapsed.
......@@ -16,6 +16,7 @@ struct mm_struct;
#include <asm/cpufeature.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
#include <asm/msr.h>
#include <asm/desc_defs.h>
......
......@@ -7,21 +7,6 @@
#ifndef __ASSEMBLY__
/* Interrupt control for vSMPowered x86_64 systems */
void vsmp_init(void);
void setup_bios_corruption_check(void);
#ifdef CONFIG_X86_VISWS
extern void visws_early_detect(void);
extern int is_visws_box(void);
#else
static inline void visws_early_detect(void) { }
static inline int is_visws_box(void) { return 0; }
#endif
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip);
/*
* Any setup quirks to be performed?
*/
......@@ -45,15 +30,9 @@ struct x86_quirks {
void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
unsigned short oemsize);
int (*setup_ioapic_ids)(void);
int (*update_genapic)(void);
int (*update_apic)(void);
};
extern struct x86_quirks *x86_quirks;
extern unsigned long saved_video_mode;
#ifndef CONFIG_PARAVIRT
#define paravirt_post_allocator_init() do {} while (0)
#endif
#endif /* __ASSEMBLY__ */
#ifdef __i386__
......@@ -76,6 +55,28 @@ extern unsigned long saved_video_mode;
#ifndef __ASSEMBLY__
#include <asm/bootparam.h>
/* Interrupt control for vSMPowered x86_64 systems */
void vsmp_init(void);
void setup_bios_corruption_check(void);
#ifdef CONFIG_X86_VISWS
extern void visws_early_detect(void);
extern int is_visws_box(void);
#else
static inline void visws_early_detect(void) { }
static inline int is_visws_box(void) { return 0; }
#endif
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip);
extern struct x86_quirks *x86_quirks;
extern unsigned long saved_video_mode;
#ifndef CONFIG_PARAVIRT
#define paravirt_post_allocator_init() do {} while (0)
#endif
#ifndef _SETUP
/*
......
......@@ -74,6 +74,7 @@ asmlinkage long sys_vfork(struct pt_regs *);
asmlinkage long sys_execve(char __user *, char __user * __user *,
char __user * __user *,
struct pt_regs *);
long sys_arch_prctl(int, unsigned long);
/* kernel/ioport.c */
asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
......
......@@ -38,22 +38,30 @@ extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
enum xen_domain_type {
XEN_NATIVE,
XEN_PV_DOMAIN,
XEN_HVM_DOMAIN,
XEN_NATIVE, /* running on bare hardware */
XEN_PV_DOMAIN, /* running in a PV domain */
XEN_HVM_DOMAIN, /* running in a Xen hvm domain */
};
extern enum xen_domain_type xen_domain_type;
#ifdef CONFIG_XEN
#define xen_domain() (xen_domain_type != XEN_NATIVE)
extern enum xen_domain_type xen_domain_type;
#else
#define xen_domain() (0)
#define xen_domain_type XEN_NATIVE
#endif
#define xen_pv_domain() (xen_domain() && xen_domain_type == XEN_PV_DOMAIN)
#define xen_hvm_domain() (xen_domain() && xen_domain_type == XEN_HVM_DOMAIN)
#define xen_domain() (xen_domain_type != XEN_NATIVE)
#define xen_pv_domain() (xen_domain() && \
xen_domain_type == XEN_PV_DOMAIN)
#define xen_hvm_domain() (xen_domain() && \
xen_domain_type == XEN_HVM_DOMAIN)
#ifdef CONFIG_XEN_DOM0
#include <xen/interface/xen.h>
#define xen_initial_domain() (xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN)
#define xen_initial_domain() (xen_pv_domain() && \
xen_start_info->flags & SIF_INITDOMAIN)
#else /* !CONFIG_XEN_DOM0 */
#define xen_initial_domain() (0)
#endif /* CONFIG_XEN_DOM0 */
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
......@@ -58,13 +58,12 @@ obj-$(CONFIG_PCI) += early-quirks.o
apm-y := apm_32.o
obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o ipi.o
obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o
obj-$(CONFIG_SMP) += setup_percpu.o
obj-$(CONFIG_X86_64_SMP) += tsc_sync.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o ipi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_X86_LOCAL_APIC) += apic/
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
......@@ -116,17 +115,13 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64
###
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
obj-y += genapic_64.o genapic_flat_64.o
obj-y += genx2apic_cluster.o
obj-y += genx2apic_phys.o
obj-$(CONFIG_X86_UV) += genx2apic_uv_x.o tlb_uv.o
obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_sysfs.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
obj-$(CONFIG_AUDIT) += audit_64.o
obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
obj-$(CONFIG_AUDIT) += audit_64.o
obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
endif
......@@ -37,7 +37,6 @@
#include <asm/pgtable.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
#include <asm/genapic.h>
#include <asm/io.h>
#include <asm/mpspec.h>
#include <asm/smp.h>
......
#
# Makefile for local APIC drivers and for the IO-APIC code
#
obj-y := apic.o ipi.o nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_SMP) += ipi.o
ifeq ($(CONFIG_X86_64),y)
obj-y += apic_64.o apic_flat_64.o
obj-$(CONFIG_X86_X2APIC) += x2apic_cluster.o
obj-$(CONFIG_X86_X2APIC) += x2apic_phys.o
obj-$(CONFIG_X86_UV) += x2apic_uv_x.o
endif
......@@ -37,7 +37,6 @@
#include <asm/perf_counter.h>
#include <asm/arch_hooks.h>
#include <asm/pgalloc.h>
#include <asm/genapic.h>
#include <asm/atomic.h>
#include <asm/mpspec.h>
#include <asm/i8253.h>
......@@ -113,11 +112,7 @@ static __init int setup_apicpmtimer(char *s)
__setup("apicpmtimer", setup_apicpmtimer);
#endif
#ifdef CONFIG_X86_64
#define HAVE_X2APIC
#endif
#ifdef HAVE_X2APIC
#ifdef CONFIG_X86_X2APIC
int x2apic;
/* x2apic enabled before OS handover */
static int x2apic_preenabled;
......@@ -215,18 +210,13 @@ static int modern_apic(void)
return lapic_get_version() >= 0x14;
}
/*
* Paravirt kernels also might be using these below ops. So we still
* use generic apic_read()/apic_write(), which might be pointing to different
* ops in PARAVIRT case.
*/
void xapic_wait_icr_idle(void)
void native_apic_wait_icr_idle(void)
{
while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
cpu_relax();
}
u32 safe_xapic_wait_icr_idle(void)
u32 native_safe_apic_wait_icr_idle(void)
{
u32 send_status;
int timeout;
......@@ -242,13 +232,13 @@ u32 safe_xapic_wait_icr_idle(void)
return send_status;
}
void xapic_icr_write(u32 low, u32 id)
void native_apic_icr_write(u32 low, u32 id)
{
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
apic_write(APIC_ICR, low);
}
static u64 xapic_icr_read(void)
u64 native_apic_icr_read(void)
{
u32 icr1, icr2;
......@@ -258,54 +248,6 @@ static u64 xapic_icr_read(void)
return icr1 | ((u64)icr2 << 32);
}
static struct apic_ops xapic_ops = {
.read = native_apic_mem_read,
.write = native_apic_mem_write,
.icr_read = xapic_icr_read,
.icr_write = xapic_icr_write,
.wait_icr_idle = xapic_wait_icr_idle,
.safe_wait_icr_idle = safe_xapic_wait_icr_idle,
};
struct apic_ops __read_mostly *apic_ops = &xapic_ops;
EXPORT_SYMBOL_GPL(apic_ops);
#ifdef HAVE_X2APIC
static void x2apic_wait_icr_idle(void)
{
/* no need to wait for icr idle in x2apic */
return;
}
static u32 safe_x2apic_wait_icr_idle(void)
{
/* no need to wait for icr idle in x2apic */
return 0;
}
void x2apic_icr_write(u32 low, u32 id)
{
wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
}
static u64 x2apic_icr_read(void)
{
unsigned long val;
rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
return val;
}
static struct apic_ops x2apic_ops = {
.read = native_apic_msr_read,
.write = native_apic_msr_write,
.icr_read = x2apic_icr_read,
.icr_write = x2apic_icr_write,
.wait_icr_idle = x2apic_wait_icr_idle,
.safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
};
#endif
/**
* enable_NMI_through_LVT0 - enable NMI through local vector table 0
*/
......@@ -1324,17 +1266,19 @@ void __cpuinit end_local_APIC_setup(void)
apic_pm_activate();
}
#ifdef HAVE_X2APIC
#ifdef CONFIG_X86_X2APIC
void check_x2apic(void)
{
int msr, msr2;
if (!cpu_has_x2apic)
return;
rdmsr(MSR_IA32_APICBASE, msr, msr2);
if (msr & X2APIC_ENABLE) {
pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
x2apic_preenabled = x2apic = 1;
apic_ops = &x2apic_ops;
}
}
......@@ -1342,6 +1286,9 @@ void enable_x2apic(void)
{
int msr, msr2;
if (!x2apic)
return;
rdmsr(MSR_IA32_APICBASE, msr, msr2);
if (!(msr & X2APIC_ENABLE)) {
pr_info("Enabling x2apic\n");
......@@ -1405,7 +1352,6 @@ void __init enable_IR_x2apic(void)
if (!x2apic) {
x2apic = 1;
apic_ops = &x2apic_ops;
enable_x2apic();
}
......@@ -1443,7 +1389,7 @@ void __init enable_IR_x2apic(void)
return;
}
#endif /* HAVE_X2APIC */
#endif /* CONFIG_X86_X2APIC */
#ifdef CONFIG_X86_64
/*
......@@ -1574,7 +1520,7 @@ void __init early_init_lapic_mapping(void)
*/
void __init init_apic_mappings(void)
{
#ifdef HAVE_X2APIC
#ifdef CONFIG_X86_X2APIC
if (x2apic) {
boot_cpu_physical_apicid = read_apic_id();
return;
......@@ -1638,9 +1584,7 @@ int __init APIC_init_uniprocessor(void)
}
#endif
#ifdef HAVE_X2APIC
enable_IR_x2apic();
#endif
#ifdef CONFIG_X86_64
default_setup_apic_routing();
#endif
......@@ -1663,35 +1607,31 @@ int __init APIC_init_uniprocessor(void)
physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
setup_local_APIC();
#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_IO_APIC
/*
* Now enable IO-APICs, actually call clear_IO_APIC
* We need clear_IO_APIC before enabling vector on BP
* We need clear_IO_APIC before enabling error vector
*/
if (!skip_ioapic_setup && nr_ioapics)
enable_IO_APIC();
#endif
#ifdef CONFIG_X86_IO_APIC
if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
#endif
localise_nmi_watchdog();
end_local_APIC_setup();
#ifdef CONFIG_X86_IO_APIC
if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
setup_IO_APIC();
# ifdef CONFIG_X86_64
else
else {
nr_ioapics = 0;
# endif
localise_nmi_watchdog();
}
#else
localise_nmi_watchdog();
#endif
setup_boot_clock();
#ifdef CONFIG_X86_64
setup_boot_APIC_clock();
check_nmi_watchdog();
#else
setup_boot_clock();
#endif
return 0;
......@@ -2029,7 +1969,7 @@ static int lapic_resume(struct sys_device *dev)
local_irq_save(flags);
#ifdef HAVE_X2APIC
#ifdef CONFIG_X86_X2APIC
if (x2apic)
enable_x2apic();
else
......
......@@ -19,24 +19,27 @@
#include <linux/dmar.h>
#include <asm/smp.h>
#include <asm/apic.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
#include <asm/setup.h>
extern struct genapic apic_flat;
extern struct genapic apic_physflat;
extern struct genapic apic_x2xpic_uv_x;
extern struct genapic apic_x2apic_phys;
extern struct genapic apic_x2apic_cluster;
extern struct apic apic_flat;
extern struct apic apic_physflat;
extern struct apic apic_x2xpic_uv_x;
extern struct apic apic_x2apic_phys;
extern struct apic apic_x2apic_cluster;
struct genapic __read_mostly *apic = &apic_flat;
struct apic __read_mostly *apic = &apic_flat;
EXPORT_SYMBOL_GPL(apic);
static struct genapic *apic_probe[] __initdata = {
static struct apic *apic_probe[] __initdata = {
#ifdef CONFIG_X86_UV
&apic_x2apic_uv_x,
#endif
#ifdef CONFIG_X86_X2APIC
&apic_x2apic_phys,
&apic_x2apic_cluster,
#endif
&apic_physflat,
NULL,
};
......@@ -46,10 +49,12 @@ static struct genapic *apic_probe[] __initdata = {
*/
void __init default_setup_apic_routing(void)
{
#ifdef CONFIG_X86_X2APIC
if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) {
if (!intr_remapping_enabled)
apic = &apic_flat;
}
#endif
if (apic == &apic_flat) {
if (max_physical_apicid >= 8)
......@@ -57,8 +62,8 @@ void __init default_setup_apic_routing(void)
printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
}
if (x86_quirks->update_genapic)
x86_quirks->update_genapic();
if (x86_quirks->update_apic)
x86_quirks->update_apic();
}
/* Same for both flat and physical. */
......
......@@ -17,8 +17,8 @@
#include <linux/init.h>
#include <linux/hardirq.h>
#include <asm/smp.h>
#include <asm/apic.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
#ifdef CONFIG_ACPI
#include <acpi/acpi_bus.h>
......@@ -178,7 +178,7 @@ static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
return hard_smp_processor_id() >> index_msb;
}
struct genapic apic_flat = {
struct apic apic_flat = {
.name = "flat",
.probe = NULL,
.acpi_madt_oem_check = flat_acpi_madt_oem_check,
......@@ -227,8 +227,14 @@ struct genapic apic_flat = {
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
.wait_for_init_deassert = NULL,
.smp_callin_clear_local_apic = NULL,
.store_NMI_vector = NULL,
.inquire_remote_apic = NULL,
.read = native_apic_mem_read,
.write = native_apic_mem_write,
.icr_read = native_apic_icr_read,
.icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
};
/*
......@@ -321,7 +327,7 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
return BAD_APICID;
}
struct genapic apic_physflat = {
struct apic apic_physflat = {
.name = "physical flat",
.probe = NULL,
......@@ -372,6 +378,12 @@ struct genapic apic_physflat = {
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
.wait_for_init_deassert = NULL,
.smp_callin_clear_local_apic = NULL,
.store_NMI_vector = NULL,
.inquire_remote_apic = NULL,
.read = native_apic_mem_read,
.write = native_apic_mem_write,
.icr_read = native_apic_icr_read,
.icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
};
......@@ -62,7 +62,7 @@
#include <asm/uv/uv_hub.h>
#include <asm/uv/uv_irq.h>
#include <asm/genapic.h>
#include <asm/apic.h>
#define __apicdebuginit(type) static type __init
......@@ -813,8 +813,9 @@ static void clear_IO_APIC (void)
*/
#define MAX_PIRQS 8
static int pirq_entries [MAX_PIRQS];
static int pirqs_enabled;
static int pirq_entries[MAX_PIRQS] = {
[0 ... MAX_PIRQS - 1] = -1
};
static int __init ioapic_pirq_setup(char *str)
{
......@@ -823,10 +824,6 @@ static int __init ioapic_pirq_setup(char *str)
get_options(str, ARRAY_SIZE(ints), ints);
for (i = 0; i < MAX_PIRQS; i++)
pirq_entries[i] = -1;
pirqs_enabled = 1;
apic_printk(APIC_VERBOSE, KERN_INFO
"PIRQ redirection, working around broken MP-BIOS.\n");
max = MAX_PIRQS;
......@@ -1976,13 +1973,6 @@ void __init enable_IO_APIC(void)
int apic;
unsigned long flags;
#ifdef CONFIG_X86_32
int i;
if (!pirqs_enabled)
for (i = 0; i < MAX_PIRQS; i++)
pirq_entries[i] = -1;
#endif
/*
* The number of IO-APIC IRQ registers (== #pins):
*/
......@@ -3057,13 +3047,9 @@ static inline void __init check_timer(void)
void __init setup_IO_APIC(void)
{
#ifdef CONFIG_X86_32
enable_IO_APIC();
#else
/*
* calling enable_IO_APIC() is moved to setup_local_APIC for BP
*/
#endif
io_apic_irqs = ~PIC_IRQS;
......
......@@ -7,8 +7,8 @@
#include <linux/dmar.h>
#include <asm/smp.h>
#include <asm/apic.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
......@@ -46,7 +46,7 @@ static void
/*
* send the IPI.
*/
x2apic_icr_write(cfg, apicid);
native_x2apic_icr_write(cfg, apicid);
}
/*
......@@ -182,7 +182,7 @@ static void init_x2apic_ldr(void)
per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
}
struct genapic apic_x2apic_cluster = {
struct apic apic_x2apic_cluster = {
.name = "cluster x2apic",
.probe = NULL,
......@@ -232,6 +232,12 @@ struct genapic apic_x2apic_cluster = {
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
.wait_for_init_deassert = NULL,
.smp_callin_clear_local_apic = NULL,
.store_NMI_vector = NULL,
.inquire_remote_apic = NULL,
.read = native_apic_msr_read,
.write = native_apic_msr_write,
.icr_read = native_x2apic_icr_read,
.icr_write = native_x2apic_icr_write,
.wait_icr_idle = native_x2apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
};
......@@ -7,8 +7,8 @@
#include <linux/dmar.h>
#include <asm/smp.h>
#include <asm/apic.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
static int x2apic_phys;
......@@ -50,7 +50,7 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
/*
* send the IPI.
*/
x2apic_icr_write(cfg, apicid);
native_x2apic_icr_write(cfg, apicid);
}
static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
......@@ -168,7 +168,7 @@ static void init_x2apic_ldr(void)
{
}
struct genapic apic_x2apic_phys = {
struct apic apic_x2apic_phys = {
.name = "physical x2apic",
.probe = NULL,
......@@ -218,6 +218,12 @@ struct genapic apic_x2apic_phys = {
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
.wait_for_init_deassert = NULL,
.smp_callin_clear_local_apic = NULL,
.store_NMI_vector = NULL,
.inquire_remote_apic = NULL,
.read = native_apic_msr_read,
.write = native_apic_msr_write,
.icr_read = native_x2apic_icr_read,
.icr_write = native_x2apic_icr_write,
.wait_icr_idle = native_x2apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
};
......@@ -22,8 +22,8 @@
#include <linux/proc_fs.h>
#include <asm/current.h>
#include <asm/smp.h>
#include <asm/apic.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
#include <asm/pgtable.h>
#include <asm/uv/uv.h>
#include <asm/uv/uv_mmrs.h>
......@@ -114,16 +114,15 @@ int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
static void uv_send_IPI_one(int cpu, int vector)
{
unsigned long val, apicid, lapicid;
unsigned long val, apicid;
int pnode;
apicid = per_cpu(x86_cpu_to_apicid, cpu);
lapicid = apicid & 0x3f; /* ZZZ macro needed */
pnode = uv_apicid_to_pnode(apicid);
val = ( 1UL << UVH_IPI_INT_SEND_SHFT ) |
( lapicid << UVH_IPI_INT_APIC_ID_SHFT ) |
( vector << UVH_IPI_INT_VECTOR_SHFT );
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
(apicid << UVH_IPI_INT_APIC_ID_SHFT) |
(vector << UVH_IPI_INT_VECTOR_SHFT);
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
}
......@@ -241,7 +240,7 @@ static void uv_send_IPI_self(int vector)
apic_write(APIC_SELF_IPI, vector);
}
struct genapic apic_x2apic_uv_x = {
struct apic apic_x2apic_uv_x = {
.name = "UV large system",
.probe = NULL,
......@@ -291,8 +290,14 @@ struct genapic apic_x2apic_uv_x = {
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
.wait_for_init_deassert = NULL,
.smp_callin_clear_local_apic = NULL,
.store_NMI_vector = NULL,
.inquire_remote_apic = NULL,
.read = native_apic_msr_read,
.write = native_apic_msr_write,
.icr_read = native_x2apic_icr_read,
.icr_write = native_x2apic_icr_write,
.wait_icr_idle = native_x2apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
};
static __cpuinit void set_x2apic_extra_bits(int pnode)
......
/*
* APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs.
* APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
*
* Drives the local APIC in "clustered mode".
*/
#define APIC_DEFINITION 1
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <asm/mpspec.h>
#include <asm/genapic.h>
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <asm/ipi.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/smp.h>
#include <asm/apicdef.h>
#include <asm/fixmap.h>
#include <asm/mpspec.h>
#include <asm/apic.h>
#include <asm/ipi.h>
static inline unsigned bigsmp_get_apic_id(unsigned long x)
{
return (x >> 24) & 0xFF;
}
#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
static inline int bigsmp_apic_id_registered(void)
{
return 1;
......@@ -37,8 +35,6 @@ static inline const cpumask_t *bigsmp_target_cpus(void)
#endif
}
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
static inline unsigned long
bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
{
......@@ -53,9 +49,11 @@ static inline unsigned long bigsmp_check_apicid_present(int bit)
static inline unsigned long calculate_ldr(int cpu)
{
unsigned long val, id;
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
id = xapic_phys_to_log_apicid(cpu);
id = per_cpu(x86_bios_cpu_apicid, cpu);
val |= SET_APIC_LOGICAL_ID(id);
return val;
}
......@@ -71,15 +69,16 @@ static inline void bigsmp_init_apic_ldr(void)
unsigned long val;
int cpu = smp_processor_id();
apic_write(APIC_DFR, APIC_DFR_VALUE);
apic_write(APIC_DFR, APIC_DFR_FLAT);
val = calculate_ldr(cpu);
apic_write(APIC_LDR, val);
}
static inline void bigsmp_setup_apic_routing(void)
{
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
"Physflat", nr_ioapics);
printk(KERN_INFO
"Enabling APIC mode: Physflat. Using %d I/O APICs\n",
nr_ioapics);
}
static inline int bigsmp_apicid_to_node(int logical_apicid)
......@@ -100,7 +99,6 @@ static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid)
return physid_mask_of_physid(phys_apicid);
}
extern u8 cpu_2_logical_apicid[];
/* Mapping from cpu number to logical apicid */
static inline int bigsmp_cpu_to_logical_apicid(int cpu)
{
......@@ -176,21 +174,24 @@ static int hp_ht_bigsmp(const struct dmi_system_id *d)
{
printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
dmi_bigsmp = 1;
return 0;
}
static const struct dmi_system_id bigsmp_dmi_table[] = {
{ hp_ht_bigsmp, "HP ProLiant DL760 G2",
{ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
DMI_MATCH(DMI_BIOS_VERSION, "P44-"),}
{ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
}
},
{ hp_ht_bigsmp, "HP ProLiant DL740",
{ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
DMI_MATCH(DMI_BIOS_VERSION, "P47-"),}
{ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
}
},
{ }
{ } /* NULL entry stops DMI scanning */
};
static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
......@@ -205,10 +206,11 @@ static int probe_bigsmp(void)
dmi_bigsmp = 1;
else
dmi_check_system(bigsmp_dmi_table);
return dmi_bigsmp;
}
struct genapic apic_bigsmp = {
struct apic apic_bigsmp = {
.name = "bigsmp",
.probe = probe_bigsmp,
......@@ -261,6 +263,12 @@ struct genapic apic_bigsmp = {
.wait_for_init_deassert = default_wait_for_init_deassert,
.smp_callin_clear_local_apic = NULL,
.store_NMI_vector = NULL,
.inquire_remote_apic = default_inquire_remote_apic,
.read = native_apic_mem_read,
.write = native_apic_mem_write,
.icr_read = native_apic_icr_read,
.icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
};
......@@ -7,7 +7,7 @@
#include <asm/pat.h>
#include <asm/processor.h>
#include <asm/genapic.h>
#include <asm/apic.h>
struct cpuid_bit {
u16 feature;
......
......@@ -12,8 +12,6 @@
# include <asm/cacheflush.h>
#endif
#include <asm/genapic.h>
#include "cpu.h"
#ifdef CONFIG_X86_32
......
......@@ -24,11 +24,9 @@
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/cpumask.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
#include <asm/genapic.h>
#include <asm/genapic.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h>
#endif
......@@ -255,9 +253,9 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
* signs here...
*/
if (cpu_has(c, df->feature) &&
((s32)df->feature < 0 ?
(u32)df->feature > (u32)c->extended_cpuid_level :
(s32)df->feature > (s32)c->cpuid_level)) {
((s32)df->level < 0 ?
(u32)df->level > (u32)c->extended_cpuid_level :
(s32)df->level > (s32)c->cpuid_level)) {
clear_cpu_cap(c, df->feature);
if (warn)
printk(KERN_WARNING
......@@ -267,7 +265,7 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
df->level);
}
}
}
}
/*
* Naming convention should be: <Name> [(<Codename>)]
......@@ -1053,7 +1051,7 @@ void __cpuinit cpu_init(void)
barrier();
check_efer();
if (cpu != 0 && x2apic)
if (cpu != 0)
enable_x2apic();
/*
......
......@@ -24,7 +24,6 @@
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
#include <asm/genapic.h>
#endif
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
......
......@@ -28,8 +28,6 @@
#include <asm/reboot.h>
#include <asm/virtext.h>
#include <asm/genapic.h>
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
......
This diff is collapsed.
......@@ -212,7 +212,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
}
#ifdef CONFIG_HOTPLUG_CPU
#include <asm/genapic.h>
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
......
......@@ -46,7 +46,7 @@
#include <asm/apicdef.h>
#include <asm/system.h>
#include <asm/genapic.h>
#include <asm/apic.h>
/*
* Put the error code here just in case the user cares:
......
......@@ -29,7 +29,7 @@
#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/genapic.h>
#include <asm/apic.h>
/*
* Checksum an MP configuration block.
*/
......
This diff is collapsed.
......@@ -203,7 +203,7 @@ static void __init platform_detect(void)
static void __init platform_detect(void)
{
/* stopgap until OFW support is added to the kernel */
olpc_platform_info.boardrev = 0xc2;
olpc_platform_info.boardrev = olpc_board(0xc2);
}
#endif
......
......@@ -8,6 +8,7 @@
*/
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
......@@ -16,20 +17,18 @@
#include <asm/fixmap.h>
#include <asm/mpspec.h>
#include <asm/apicdef.h>
#include <asm/genapic.h>
#include <asm/apic.h>
#include <asm/setup.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <asm/mpspec.h>
#include <asm/genapic.h>
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <asm/genapic.h>
#include <asm/ipi.h>
#include <linux/smp.h>
......@@ -40,8 +39,6 @@
#include <asm/e820.h>
#include <asm/setup.h>
#include <asm/genapic.h>
#ifdef CONFIG_HOTPLUG_CPU
#define DEFAULT_SEND_IPI (1)
#else
......@@ -52,6 +49,15 @@ int no_broadcast = DEFAULT_SEND_IPI;
#ifdef CONFIG_X86_LOCAL_APIC
void default_setup_apic_routing(void)
{
#ifdef CONFIG_X86_IO_APIC
printk(KERN_INFO
"Enabling APIC mode: Flat. Using %d I/O APICs\n",
nr_ioapics);
#endif
}
static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
/*
......@@ -72,7 +78,7 @@ static int probe_default(void)
return 1;
}
struct genapic apic_default = {
struct apic apic_default = {
.name = "default",
.probe = probe_default,
......@@ -125,19 +131,26 @@ struct genapic apic_default = {
.wait_for_init_deassert = default_wait_for_init_deassert,
.smp_callin_clear_local_apic = NULL,
.store_NMI_vector = NULL,
.inquire_remote_apic = default_inquire_remote_apic,
.read = native_apic_mem_read,
.write = native_apic_mem_write,
.icr_read = native_apic_icr_read,
.icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
};
extern struct genapic apic_numaq;
extern struct genapic apic_summit;
extern struct genapic apic_bigsmp;
extern struct genapic apic_es7000;
extern struct genapic apic_default;
extern struct apic apic_numaq;
extern struct apic apic_summit;
extern struct apic apic_bigsmp;
extern struct apic apic_es7000;
extern struct apic apic_default;
struct genapic *apic = &apic_default;
struct apic *apic = &apic_default;
EXPORT_SYMBOL_GPL(apic);
static struct genapic *apic_probe[] __initdata = {
static struct apic *apic_probe[] __initdata = {
#ifdef CONFIG_X86_NUMAQ
&apic_numaq,
#endif
......@@ -170,8 +183,8 @@ static int __init parse_apic(char *arg)
}
}
if (x86_quirks->update_genapic)
x86_quirks->update_genapic();
if (x86_quirks->update_apic)
x86_quirks->update_apic();
/* Parsed again by __setup for debug/verbose */
return 0;
......@@ -191,8 +204,8 @@ void __init generic_bigsmp_probe(void)
if (!cmdline_apic && apic == &apic_default) {
if (apic_bigsmp.probe()) {
apic = &apic_bigsmp;
if (x86_quirks->update_genapic)
x86_quirks->update_genapic();
if (x86_quirks->update_apic)
x86_quirks->update_apic();
printk(KERN_INFO "Overriding APIC driver with %s\n",
apic->name);
}
......@@ -214,8 +227,8 @@ void __init generic_apic_probe(void)
if (!apic_probe[i])
panic("Didn't find an APIC driver");
if (x86_quirks->update_genapic)
x86_quirks->update_genapic();
if (x86_quirks->update_apic)
x86_quirks->update_apic();
}
printk(KERN_INFO "Using APIC driver %s\n", apic->name);
}
......@@ -235,8 +248,8 @@ generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
if (!cmdline_apic) {
apic = apic_probe[i];
if (x86_quirks->update_genapic)
x86_quirks->update_genapic();
if (x86_quirks->update_apic)
x86_quirks->update_apic();
printk(KERN_INFO "Switched to APIC driver `%s'.\n",
apic->name);
}
......@@ -257,8 +270,8 @@ int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
if (!cmdline_apic) {
apic = apic_probe[i];
if (x86_quirks->update_genapic)
x86_quirks->update_genapic();
if (x86_quirks->update_apic)
x86_quirks->update_apic();
printk(KERN_INFO "Switched to APIC driver `%s'.\n",
apic->name);
}
......
......@@ -24,8 +24,6 @@
# include <asm/iommu.h>
#endif
#include <asm/genapic.h>
/*
* Power off function, if any
*/
......
......@@ -97,7 +97,6 @@
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/genapic.h>
#include <asm/paravirt.h>
#include <asm/hypervisor.h>
......@@ -600,7 +599,7 @@ static int __init setup_elfcorehdr(char *arg)
early_param("elfcorehdr", setup_elfcorehdr);
#endif
static int __init default_update_genapic(void)
static int __init default_update_apic(void)
{
#ifdef CONFIG_SMP
if (!apic->wakeup_cpu)
......@@ -611,7 +610,7 @@ static int __init default_update_genapic(void)
}
static struct x86_quirks default_x86_quirks __initdata = {
.update_genapic = default_update_genapic,
.update_apic = default_update_apic,
};
struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
......@@ -836,8 +835,7 @@ void __init setup_arch(char **cmdline_p)
#else
num_physpages = max_pfn;
if (cpu_has_x2apic)
check_x2apic();
check_x2apic();
/* How many end-of-memory variables you have, grandma! */
/* need this before calling reserve_initrd */
......
......@@ -26,7 +26,7 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/genapic.h>
#include <asm/apic.h>
/*
* Some notes on x86 processor bugs affecting SMP operation:
*
......
......@@ -60,12 +60,11 @@
#include <asm/tlbflush.h>
#include <asm/mtrr.h>
#include <asm/vmi.h>
#include <asm/genapic.h>
#include <asm/apic.h>
#include <asm/setup.h>
#include <asm/uv/uv.h>
#include <linux/mc146818rtc.h>
#include <asm/genapic.h>
#include <asm/smpboot_hooks.h>
#ifdef CONFIG_X86_32
......@@ -746,21 +745,21 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
complete(&c_idle->done);
}
static int __cpuinit do_boot_cpu(int apicid, int cpu)
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
* (ie clustered apic addressing mode), this is a LOGICAL apic ID.
* Returns zero if CPU booted OK, else error code from ->wakeup_cpu.
*/
static int __cpuinit do_boot_cpu(int apicid, int cpu)
{
unsigned long boot_error = 0;
int timeout;
unsigned long start_ip;
unsigned short nmi_high = 0, nmi_low = 0;
int timeout;
struct create_idle c_idle = {
.cpu = cpu,
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
.cpu = cpu,
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
};
INIT_WORK(&c_idle.work, do_fork_idle);
alternatives_smp_switch(1);
......@@ -825,9 +824,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
pr_debug("Setting warm reset code and vector.\n");
if (apic->store_NMI_vector)
apic->store_NMI_vector(&nmi_high, &nmi_low);
smpboot_setup_warm_reset_vector(start_ip);
/*
* Be paranoid about clearing APIC errors.
......@@ -1128,8 +1124,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
current_thread_info()->cpu = 0; /* needed? */
set_cpu_sibling_map(0);
#ifdef CONFIG_X86_64
enable_IR_x2apic();
#ifdef CONFIG_X86_64
default_setup_apic_routing();
#endif
......@@ -1154,13 +1150,12 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
*/
setup_local_APIC();
#ifdef CONFIG_X86_64
/*
* Enable IO APIC before setting up error vector
*/
if (!skip_ioapic_setup && nr_ioapics)
enable_IO_APIC();
#endif
end_local_APIC_setup();
map_cpu_to_logical_apicid();
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment