Commit d8470596 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/apic' into x86-v28-for-linus-phase4-B

Conflicts:
	arch/x86/kernel/apic_32.c
	arch/x86/kernel/apic_64.c
	arch/x86/kernel/setup.c
	drivers/pci/intel-iommu.c
	include/asm-x86/cpufeature.h
	include/asm-x86/dma-mapping.h
parents 725c2581 11494547
......@@ -1424,6 +1424,12 @@ and is between 256 and 4096 characters. It is defined in the file
nolapic_timer [X86-32,APIC] Do not use the local APIC timer.
nox2apic [X86-64,APIC] Do not enable x2APIC mode.
x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
default x2apic cluster mode on platforms
supporting x2apic.
noltlbs [PPC] Do not use large page/tlb entries for kernel
lowmem mapping on PPC40x.
......
......@@ -41,12 +41,12 @@
#define stub_rt_sigreturn sys_rt_sigreturn
#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
#undef _ASM_X86_64_UNISTD_H_
#undef ASM_X86__UNISTD_64_H
#include <asm-x86/unistd_64.h>
#undef __SYSCALL
#define __SYSCALL(nr, sym) [ nr ] = sym,
#undef _ASM_X86_64_UNISTD_H_
#undef ASM_X86__UNISTD_64_H
typedef void (*sys_call_ptr_t)(void);
......
......@@ -1689,6 +1689,14 @@ config DMAR_FLOPPY_WA
workaround will setup a 1:1 mapping for the first
16M to make floppy (an ISA device) work.
config INTR_REMAP
bool "Support for Interrupt Remapping (EXPERIMENTAL)"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
help
Supports Interrupt remapping for IO-APIC and MSI devices.
To use x2apic mode in the CPU's which support x2APIC enhancements or
to support platforms with CPU's having > 8 bit APIC ID, say Y.
source "drivers/pci/pcie/Kconfig"
source "drivers/pci/Kconfig"
......
......@@ -104,6 +104,8 @@ obj-$(CONFIG_OLPC) += olpc.o
ifeq ($(CONFIG_X86_64),y)
obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
obj-y += bios_uv.o
obj-y += genx2apic_cluster.o
obj-y += genx2apic_phys.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
obj-$(CONFIG_AUDIT) += audit_64.o
......
......@@ -252,10 +252,8 @@ static void __cpuinit acpi_register_lapic(int id, u8 enabled)
return;
}
#ifdef CONFIG_X86_32
if (boot_cpu_physical_apicid != -1U)
ver = apic_version[boot_cpu_physical_apicid];
#endif
generic_processor_info(id, ver);
}
......@@ -774,11 +772,9 @@ static void __init acpi_register_lapic_address(unsigned long address)
set_fixmap_nocache(FIX_APIC_BASE, address);
if (boot_cpu_physical_apicid == -1U) {
boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
#ifdef CONFIG_X86_32
boot_cpu_physical_apicid = read_apic_id();
apic_version[boot_cpu_physical_apicid] =
GET_APIC_VERSION(apic_read(APIC_LVR));
#endif
}
}
......@@ -1350,7 +1346,9 @@ static void __init acpi_process_madt(void)
acpi_ioapic = 1;
smp_found_config = 1;
#ifdef CONFIG_X86_32
setup_apic_routing();
#endif
}
}
if (error == -EINVAL) {
......
This diff is collapsed.
This diff is collapsed.
......@@ -687,6 +687,8 @@ void __cpuinit cpu_init(void)
barrier();
check_efer();
if (cpu != 0 && x2apic)
enable_x2apic();
/*
* set up and load the per-CPU TSS
......
......@@ -46,7 +46,7 @@ const char * const x86_cap_flags[NCAPINTS*32] = {
/* Intel-defined (#2) */
"pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
"tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
NULL, NULL, "dca", "sse4_1", "sse4_2", "x2apic", NULL, "popcnt",
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* VIA/Cyrix/Centaur-defined */
......
......@@ -16,87 +16,63 @@
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/dmar.h>
#include <asm/smp.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
#ifdef CONFIG_ACPI
#include <acpi/acpi_bus.h>
#endif
DEFINE_PER_CPU(int, x2apic_extra_bits);
extern struct genapic apic_flat;
extern struct genapic apic_physflat;
extern struct genapic apic_x2xpic_uv_x;
extern struct genapic apic_x2apic_phys;
extern struct genapic apic_x2apic_cluster;
struct genapic __read_mostly *genapic = &apic_flat;
static enum uv_system_type uv_system_type;
static struct genapic *apic_probe[] __initdata = {
&apic_x2apic_uv_x,
&apic_x2apic_phys,
&apic_x2apic_cluster,
&apic_physflat,
NULL,
};
/*
* Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
*/
void __init setup_apic_routing(void)
{
if (uv_system_type == UV_NON_UNIQUE_APIC)
genapic = &apic_x2apic_uv_x;
else
#ifdef CONFIG_ACPI
/*
* Quirk: some x86_64 machines can only use physical APIC mode
* regardless of how many processors are present (x86_64 ES7000
* is an example).
*/
if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL))
genapic = &apic_physflat;
else
#endif
if (max_physical_apicid < 8)
if (genapic == &apic_x2apic_phys || genapic == &apic_x2apic_cluster) {
if (!intr_remapping_enabled)
genapic = &apic_flat;
else
genapic = &apic_physflat;
}
if (genapic == &apic_flat) {
if (max_physical_apicid >= 8)
genapic = &apic_physflat;
printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
}
}
/* Same for both flat and physical. */
void send_IPI_self(int vector)
void apic_send_IPI_self(int vector)
{
__send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
}
int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
if (!strcmp(oem_id, "SGI")) {
if (!strcmp(oem_table_id, "UVL"))
uv_system_type = UV_LEGACY_APIC;
else if (!strcmp(oem_table_id, "UVX"))
uv_system_type = UV_X2APIC;
else if (!strcmp(oem_table_id, "UVH"))
uv_system_type = UV_NON_UNIQUE_APIC;
int i;
for (i = 0; apic_probe[i]; ++i) {
if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
genapic = apic_probe[i];
printk(KERN_INFO "Setting APIC routing to %s.\n",
genapic->name);
return 1;
}
}
return 0;
}
unsigned int read_apic_id(void)
{
unsigned int id;
WARN_ON(preemptible() && num_online_cpus() > 1);
id = apic_read(APIC_ID);
if (uv_system_type >= UV_X2APIC)
id |= __get_cpu_var(x2apic_extra_bits);
return id;
}
enum uv_system_type get_uv_system_type(void)
{
return uv_system_type;
}
int is_uv_system(void)
{
return uv_system_type != UV_NONE;
}
EXPORT_SYMBOL_GPL(is_uv_system);
......@@ -15,9 +15,20 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/hardirq.h>
#include <asm/smp.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
#include <mach_apicdef.h>
#ifdef CONFIG_ACPI
#include <acpi/acpi_bus.h>
#endif
static int __init flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
return 1;
}
static cpumask_t flat_target_cpus(void)
{
......@@ -95,9 +106,33 @@ static void flat_send_IPI_all(int vector)
__send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
}
static unsigned int get_apic_id(unsigned long x)
{
unsigned int id;
id = (((x)>>24) & 0xFFu);
return id;
}
static unsigned long set_apic_id(unsigned int id)
{
unsigned long x;
x = ((id & 0xFFu)<<24);
return x;
}
static unsigned int read_xapic_id(void)
{
unsigned int id;
id = get_apic_id(apic_read(APIC_ID));
return id;
}
static int flat_apic_id_registered(void)
{
return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map);
return physid_isset(read_xapic_id(), phys_cpu_present_map);
}
static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
......@@ -112,6 +147,7 @@ static unsigned int phys_pkg_id(int index_msb)
struct genapic apic_flat = {
.name = "flat",
.acpi_madt_oem_check = flat_acpi_madt_oem_check,
.int_delivery_mode = dest_LowestPrio,
.int_dest_mode = (APIC_DEST_LOGICAL != 0),
.target_cpus = flat_target_cpus,
......@@ -121,8 +157,12 @@ struct genapic apic_flat = {
.send_IPI_all = flat_send_IPI_all,
.send_IPI_allbutself = flat_send_IPI_allbutself,
.send_IPI_mask = flat_send_IPI_mask,
.send_IPI_self = apic_send_IPI_self,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id,
.get_apic_id = get_apic_id,
.set_apic_id = set_apic_id,
.apic_id_mask = (0xFFu<<24),
};
/*
......@@ -130,6 +170,21 @@ struct genapic apic_flat = {
* We cannot use logical delivery in this case because the mask
* overflows, so use physical mode.
*/
static int __init physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
#ifdef CONFIG_ACPI
/*
* Quirk: some x86_64 machines can only use physical APIC mode
* regardless of how many processors are present (x86_64 ES7000
* is an example).
*/
if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL))
return 1;
#endif
return 0;
}
static cpumask_t physflat_target_cpus(void)
{
......@@ -176,6 +231,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
struct genapic apic_physflat = {
.name = "physical flat",
.acpi_madt_oem_check = physflat_acpi_madt_oem_check,
.int_delivery_mode = dest_Fixed,
.int_dest_mode = (APIC_DEST_PHYSICAL != 0),
.target_cpus = physflat_target_cpus,
......@@ -185,6 +241,10 @@ struct genapic apic_physflat = {
.send_IPI_all = physflat_send_IPI_all,
.send_IPI_allbutself = physflat_send_IPI_allbutself,
.send_IPI_mask = physflat_send_IPI_mask,
.send_IPI_self = apic_send_IPI_self,
.cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id,
.get_apic_id = get_apic_id,
.set_apic_id = set_apic_id,
.apic_id_mask = (0xFFu<<24),
};
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/dmar.h>
#include <asm/smp.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
static int __init x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
if (cpu_has_x2apic)
return 1;
return 0;
}
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
static cpumask_t x2apic_target_cpus(void)
{
return cpumask_of_cpu(0);
}
/*
* for now each logical cpu is in its own vector allocation domain.
*/
static cpumask_t x2apic_vector_allocation_domain(int cpu)
{
cpumask_t domain = CPU_MASK_NONE;
cpu_set(cpu, domain);
return domain;
}
static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
unsigned int dest)
{
unsigned long cfg;
cfg = __prepare_ICR(0, vector, dest);
/*
* send the IPI.
*/
x2apic_icr_write(cfg, apicid);
}
/*
* for now, we send the IPI's one by one in the cpumask.
* TBD: Based on the cpu mask, we can send the IPI's to the cluster group
* at once. We have 16 cpu's in a cluster. This will minimize IPI register
* writes.
*/
static void x2apic_send_IPI_mask(cpumask_t mask, int vector)
{
unsigned long flags;
unsigned long query_cpu;
local_irq_save(flags);
for_each_cpu_mask(query_cpu, mask) {
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu),
vector, APIC_DEST_LOGICAL);
}
local_irq_restore(flags);
}
static void x2apic_send_IPI_allbutself(int vector)
{
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask))
x2apic_send_IPI_mask(mask, vector);
}
static void x2apic_send_IPI_all(int vector)
{
x2apic_send_IPI_mask(cpu_online_map, vector);
}
static int x2apic_apic_id_registered(void)
{
return 1;
}
static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = first_cpu(cpumask);
if ((unsigned)cpu < NR_CPUS)
return per_cpu(x86_cpu_to_logical_apicid, cpu);
else
return BAD_APICID;
}
static unsigned int get_apic_id(unsigned long x)
{
unsigned int id;
id = x;
return id;
}
static unsigned long set_apic_id(unsigned int id)
{
unsigned long x;
x = id;
return x;
}
static unsigned int x2apic_read_id(void)
{
return apic_read(APIC_ID);
}
static unsigned int phys_pkg_id(int index_msb)
{
return x2apic_read_id() >> index_msb;
}
static void x2apic_send_IPI_self(int vector)
{
apic_write(APIC_SELF_IPI, vector);
}
static void init_x2apic_ldr(void)
{
int cpu = smp_processor_id();
per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
return;
}
struct genapic apic_x2apic_cluster = {
.name = "cluster x2apic",
.acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
.int_delivery_mode = dest_LowestPrio,
.int_dest_mode = (APIC_DEST_LOGICAL != 0),
.target_cpus = x2apic_target_cpus,
.vector_allocation_domain = x2apic_vector_allocation_domain,
.apic_id_registered = x2apic_apic_id_registered,
.init_apic_ldr = init_x2apic_ldr,
.send_IPI_all = x2apic_send_IPI_all,
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_mask = x2apic_send_IPI_mask,
.send_IPI_self = x2apic_send_IPI_self,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id,
.get_apic_id = get_apic_id,
.set_apic_id = set_apic_id,
.apic_id_mask = (0xFFFFFFFFu),
};
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/dmar.h>
#include <asm/smp.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
static int x2apic_phys;
static int set_x2apic_phys_mode(char *arg)
{
x2apic_phys = 1;
return 0;
}
early_param("x2apic_phys", set_x2apic_phys_mode);
static int __init x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
if (cpu_has_x2apic && x2apic_phys)
return 1;
return 0;
}
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
static cpumask_t x2apic_target_cpus(void)
{
return cpumask_of_cpu(0);
}
static cpumask_t x2apic_vector_allocation_domain(int cpu)
{
cpumask_t domain = CPU_MASK_NONE;
cpu_set(cpu, domain);
return domain;
}
static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
unsigned int dest)
{
unsigned long cfg;
cfg = __prepare_ICR(0, vector, dest);
/*
* send the IPI.
*/
x2apic_icr_write(cfg, apicid);
}
static void x2apic_send_IPI_mask(cpumask_t mask, int vector)
{
unsigned long flags;
unsigned long query_cpu;
local_irq_save(flags);
for_each_cpu_mask(query_cpu, mask) {
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
}
static void x2apic_send_IPI_allbutself(int vector)
{
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask))
x2apic_send_IPI_mask(mask, vector);
}
static void x2apic_send_IPI_all(int vector)
{
x2apic_send_IPI_mask(cpu_online_map, vector);
}
static int x2apic_apic_id_registered(void)
{
return 1;
}
static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = first_cpu(cpumask);
if ((unsigned)cpu < NR_CPUS)
return per_cpu(x86_cpu_to_apicid, cpu);
else
return BAD_APICID;
}
static unsigned int get_apic_id(unsigned long x)
{
unsigned int id;
id = x;
return id;
}
static unsigned long set_apic_id(unsigned int id)
{
unsigned long x;
x = id;
return x;
}
static unsigned int x2apic_read_id(void)
{
return apic_read(APIC_ID);
}
static unsigned int phys_pkg_id(int index_msb)
{
return x2apic_read_id() >> index_msb;
}
void x2apic_send_IPI_self(int vector)
{
apic_write(APIC_SELF_IPI, vector);
}
void init_x2apic_ldr(void)
{
return;
}
struct genapic apic_x2apic_phys = {
.name = "physical x2apic",
.acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
.int_delivery_mode = dest_Fixed,
.int_dest_mode = (APIC_DEST_PHYSICAL != 0),
.target_cpus = x2apic_target_cpus,
.vector_allocation_domain = x2apic_vector_allocation_domain,
.apic_id_registered = x2apic_apic_id_registered,
.init_apic_ldr = init_x2apic_ldr,
.send_IPI_all = x2apic_send_IPI_all,
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_mask = x2apic_send_IPI_mask,
.send_IPI_self = x2apic_send_IPI_self,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id,
.get_apic_id = get_apic_id,
.set_apic_id = set_apic_id,
.apic_id_mask = (0xFFFFFFFFu),
};
......@@ -12,12 +12,12 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <asm/smp.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
......@@ -26,6 +26,36 @@
#include <asm/uv/uv_hub.h>
#include <asm/uv/bios.h>
DEFINE_PER_CPU(int, x2apic_extra_bits);
static enum uv_system_type uv_system_type;
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
if (!strcmp(oem_id, "SGI")) {
if (!strcmp(oem_table_id, "UVL"))
uv_system_type = UV_LEGACY_APIC;
else if (!strcmp(oem_table_id, "UVX"))
uv_system_type = UV_X2APIC;
else if (!strcmp(oem_table_id, "UVH")) {
uv_system_type = UV_NON_UNIQUE_APIC;
return 1;
}
}
return 0;
}
enum uv_system_type get_uv_system_type(void)
{
return uv_system_type;
}
int is_uv_system(void)
{
return uv_system_type != UV_NONE;
}
EXPORT_SYMBOL_GPL(is_uv_system);
DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
......@@ -123,6 +153,10 @@ static int uv_apic_id_registered(void)
return 1;
}
static void uv_init_apic_ldr(void)
{
}
static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
{
int cpu;
......@@ -138,9 +172,34 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
return BAD_APICID;
}
static unsigned int get_apic_id(unsigned long x)
{
unsigned int id;
WARN_ON(preemptible() && num_online_cpus() > 1);
id = x | __get_cpu_var(x2apic_extra_bits);
return id;
}
static unsigned long set_apic_id(unsigned int id)
{
unsigned long x;
/* maskout x2apic_extra_bits ? */
x = id;
return x;
}
static unsigned int uv_read_apic_id(void)
{
return get_apic_id(apic_read(APIC_ID));
}
static unsigned int phys_pkg_id(int index_msb)
{
return GET_APIC_ID(read_apic_id()) >> index_msb;
return uv_read_apic_id() >> index_msb;
}
#ifdef ZZZ /* Needs x2apic patch */
......@@ -152,17 +211,22 @@ static void uv_send_IPI_self(int vector)
struct genapic apic_x2apic_uv_x = {
.name = "UV large system",
.acpi_madt_oem_check = uv_acpi_madt_oem_check,
.int_delivery_mode = dest_Fixed,
.int_dest_mode = (APIC_DEST_PHYSICAL != 0),
.target_cpus = uv_target_cpus,
.vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */
.apic_id_registered = uv_apic_id_registered,
.init_apic_ldr = uv_init_apic_ldr,
.send_IPI_all = uv_send_IPI_all,
.send_IPI_allbutself = uv_send_IPI_allbutself,
.send_IPI_mask = uv_send_IPI_mask,
/* ZZZ.send_IPI_self = uv_send_IPI_self, */
.cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */
.get_apic_id = get_apic_id,
.set_apic_id = set_apic_id,
.apic_id_mask = (0xFFFFFFFFu),
};
static __cpuinit void set_x2apic_extra_bits(int pnode)
......@@ -401,3 +465,5 @@ void __cpuinit uv_cpu_init(void)
if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
set_x2apic_extra_bits(uv_hub_info->pnode);
}
......@@ -282,6 +282,30 @@ static int __init i8259A_init_sysfs(void)
device_initcall(i8259A_init_sysfs);
void mask_8259A(void)
{
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
spin_unlock_irqrestore(&i8259A_lock, flags);
}
void unmask_8259A(void)
{
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
spin_unlock_irqrestore(&i8259A_lock, flags);
}
void init_8259A(int auto_eoi)
{
unsigned long flags;
......
......@@ -46,10 +46,13 @@
#include <asm/nmi.h>
#include <asm/msidef.h>
#include <asm/hypertransport.h>
#include <asm/setup.h>
#include <mach_apic.h>
#include <mach_apicdef.h>
#define __apicdebuginit(type) static type __init
int (*ioapic_renumber_irq)(int ioapic, int irq);
atomic_t irq_mis_count;
......@@ -1341,7 +1344,8 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
ioapic_write_entry(apic, pin, entry);
}
void __init print_IO_APIC(void)
__apicdebuginit(void) print_IO_APIC(void)
{
int apic, i;
union IO_APIC_reg_00 reg_00;
......@@ -1456,9 +1460,7 @@ void __init print_IO_APIC(void)
return;
}
#if 0
static void print_APIC_bitfield(int base)
__apicdebuginit(void) print_APIC_bitfield(int base)
{
unsigned int v;
int i, j;
......@@ -1479,9 +1481,10 @@ static void print_APIC_bitfield(int base)
}
}
void /*__init*/ print_local_APIC(void *dummy)
__apicdebuginit(void) print_local_APIC(void *dummy)
{
unsigned int v, ver, maxlvt;
u64 icr;
if (apic_verbosity == APIC_QUIET)
return;
......@@ -1490,7 +1493,7 @@ void /*__init*/ print_local_APIC(void *dummy)
smp_processor_id(), hard_smp_processor_id());
v = apic_read(APIC_ID);
printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v,
GET_APIC_ID(read_apic_id()));
GET_APIC_ID(v));
v = apic_read(APIC_LVR);
printk(KERN_INFO "... APIC VERSION: %08x\n", v);
ver = GET_APIC_VERSION(v);
......@@ -1532,10 +1535,9 @@ void /*__init*/ print_local_APIC(void *dummy)
printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
}
v = apic_read(APIC_ICR);
printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
v = apic_read(APIC_ICR2);
printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
icr = apic_icr_read();
printk(KERN_DEBUG "... APIC ICR: %08x\n", icr);
printk(KERN_DEBUG "... APIC ICR2: %08x\n", icr >> 32);
v = apic_read(APIC_LVTT);
printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
......@@ -1563,12 +1565,12 @@ void /*__init*/ print_local_APIC(void *dummy)
printk("\n");
}
void print_all_local_APICs(void)
__apicdebuginit(void) print_all_local_APICs(void)
{
on_each_cpu(print_local_APIC, NULL, 1);
}
void /*__init*/ print_PIC(void)
__apicdebuginit(void) print_PIC(void)
{
unsigned int v;
unsigned long flags;
......@@ -1600,7 +1602,17 @@ void /*__init*/ print_PIC(void)
printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
}
#endif /* 0 */
__apicdebuginit(int) print_all_ICs(void)
{
print_PIC();
print_all_local_APICs();
print_IO_APIC();
return 0;
}
fs_initcall(print_all_ICs);
static void __init enable_IO_APIC(void)
{
......@@ -1698,8 +1710,7 @@ void disable_IO_APIC(void)
entry.dest_mode = 0; /* Physical */
entry.delivery_mode = dest_ExtINT; /* ExtInt */
entry.vector = 0;
entry.dest.physical.physical_dest =
GET_APIC_ID(read_apic_id());
entry.dest.physical.physical_dest = read_apic_id();
/*
* Add it to the IO-APIC irq-routing table:
......@@ -1725,10 +1736,8 @@ static void __init setup_ioapic_ids_from_mpc(void)
unsigned char old_id;
unsigned long flags;
#ifdef CONFIG_X86_NUMAQ
if (found_numaq)
if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
return;
#endif
/*
* Don't check I/O APIC IDs for xAPIC systems. They have
......@@ -2329,8 +2338,6 @@ void __init setup_IO_APIC(void)
setup_IO_APIC_irqs();
init_IO_APIC_traps();
check_timer();
if (!acpi_ioapic)
print_IO_APIC();
}
/*
......
This diff is collapsed.
......@@ -74,6 +74,15 @@ void __init init_ISA_irqs (void)
}
}
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
.handler = no_action,
.mask = CPU_MASK_NONE,
.name = "cascade",
};
/* Overridden in paravirt.c */
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
......@@ -98,6 +107,46 @@ void __init native_init_IRQ(void)
set_intr_gate(vector, interrupt[i]);
}
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
/*
* IRQ0 must be given a fixed assignment and initialized,
* because it's used before the IO-APIC is set up.
*/
set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
/*
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
* IPI, driven by wakeup.
*/
alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
/* IPI for invalidation */
alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
/* IPI for generic function call */
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
/* IPI for single call function */
set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
/* self generated IPI for local APIC timer */
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
/* IPI vectors for APIC spurious and error interrupts */
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
#endif
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL)
/* thermal monitor LVT interrupt */
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
#endif
if (!acpi_ioapic)
setup_irq(2, &irq2);
/* setup after call gates are initialised (usually add in
* the architecture specific gates)
*/
......
......@@ -397,7 +397,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
generic_bigsmp_probe();
#endif
#ifdef CONFIG_X86_32
setup_apic_routing();
#endif
if (!num_processors)
printk(KERN_ERR "MPTABLE: no processors registered!\n");
return num_processors;
......
......@@ -229,6 +229,12 @@ static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable,
}
}
static int __init numaq_setup_ioapic_ids(void)
{
/* so can skip it */
return 1;
}
static struct x86_quirks numaq_x86_quirks __initdata = {
.arch_pre_time_init = numaq_pre_time_init,
.arch_time_init = NULL,
......@@ -243,6 +249,7 @@ static struct x86_quirks numaq_x86_quirks __initdata = {
.mpc_oem_bus_info = mpc_oem_bus_info,
.mpc_oem_pci_bus = mpc_oem_pci_bus,
.smp_read_mpc_oem = smp_read_mpc_oem,
.setup_ioapic_ids = numaq_setup_ioapic_ids,
};
void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
......
......@@ -374,8 +374,6 @@ struct pv_cpu_ops pv_cpu_ops = {
struct pv_apic_ops pv_apic_ops = {
#ifdef CONFIG_X86_LOCAL_APIC
.apic_write = native_apic_write,
.apic_read = native_apic_read,
.setup_boot_clock = setup_boot_APIC_clock,
.setup_secondary_clock = setup_secondary_APIC_clock,
.startup_ipi_hook = paravirt_nop,
......
......@@ -758,6 +758,8 @@ void __init setup_arch(char **cmdline_p)
#else
num_physpages = max_pfn;
if (cpu_has_x2apic)
check_x2apic();
/* How many end-of-memory variables you have, grandma! */
/* need this before calling reserve_initrd */
......
......@@ -123,7 +123,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
static atomic_t init_deasserted;
static int boot_cpu_logical_apicid;
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
......@@ -165,6 +164,8 @@ static void unmap_cpu_to_node(int cpu)
#endif
#ifdef CONFIG_X86_32
static int boot_cpu_logical_apicid;
u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = BAD_APICID };
......@@ -210,7 +211,7 @@ static void __cpuinit smp_callin(void)
/*
* (This works even if the APIC is not enabled.)
*/
phys_id = GET_APIC_ID(read_apic_id());
phys_id = read_apic_id();
cpuid = smp_processor_id();
if (cpu_isset(cpuid, cpu_callin_map)) {
panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
......@@ -550,8 +551,7 @@ static inline void __inquire_remote_apic(int apicid)
printk(KERN_CONT
"a previous APIC delivery may have failed\n");
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
timeout = 0;
do {
......@@ -583,11 +583,9 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
int maxlvt;
/* Target chip */
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
/* Boot on the stack */
/* Kick the second */
apic_write(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid);
pr_debug("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
......@@ -640,13 +638,11 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
/*
* Turn INIT on target chip
*/
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
/*
* Send IPI
*/
apic_write(APIC_ICR,
APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT);
apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
phys_apicid);
pr_debug("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
......@@ -656,10 +652,8 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
pr_debug("Deasserting INIT.\n");
/* Target chip */
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
/* Send IPI */
apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
pr_debug("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
......@@ -702,11 +696,10 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
*/
/* Target chip */
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
/* Boot on the stack */
/* Kick the second */
apic_write(APIC_ICR, APIC_DM_STARTUP | (start_eip >> 12));
apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
phys_apicid);
/*
* Give the other CPU some time to accept the IPI.
......@@ -1175,10 +1168,17 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
* Setup boot CPU information
*/
smp_store_cpu_info(0); /* Final full version of the data */
#ifdef CONFIG_X86_32
boot_cpu_logical_apicid = logical_smp_processor_id();
#endif
current_thread_info()->cpu = 0; /* needed? */
set_cpu_sibling_map(0);
#ifdef CONFIG_X86_64
enable_IR_x2apic();
setup_apic_routing();
#endif
if (smp_sanity_check(max_cpus) < 0) {
printk(KERN_INFO "SMP disabled\n");
disable_smp();
......@@ -1186,9 +1186,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
}
preempt_disable();
if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) {
if (read_apic_id() != boot_cpu_physical_apicid) {
panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid);
read_apic_id(), boot_cpu_physical_apicid);
/* Or can we switch back to PIC here? */
}
preempt_enable();
......
......@@ -30,7 +30,7 @@
#include <linux/init.h>
#include <asm/io.h>
#include <asm/bios_ebda.h>
#include <asm/mach-summit/mach_mpparse.h>
#include <asm/summit/mpparse.h>
static struct rio_table_hdr *rio_table_hdr __initdata;
static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata;
......
......@@ -905,8 +905,8 @@ static inline int __init activate_vmi(void)
#endif
#ifdef CONFIG_X86_LOCAL_APIC
para_fill(pv_apic_ops.apic_read, APICRead);
para_fill(pv_apic_ops.apic_write, APICWrite);
para_fill(apic_ops->read, APICRead);
para_fill(apic_ops->write, APICWrite);
#endif
/*
......
......@@ -55,6 +55,7 @@
#include <linux/lguest_launcher.h>
#include <linux/virtio_console.h>
#include <linux/pm.h>
#include <asm/apic.h>
#include <asm/lguest.h>
#include <asm/paravirt.h>
#include <asm/param.h>
......@@ -783,14 +784,44 @@ static void lguest_wbinvd(void)
* code qualifies for Advanced. It will also never interrupt anything. It
* does, however, allow us to get through the Linux boot code. */
#ifdef CONFIG_X86_LOCAL_APIC
static void lguest_apic_write(unsigned long reg, u32 v)
static void lguest_apic_write(u32 reg, u32 v)
{
}
static u32 lguest_apic_read(unsigned long reg)
static u32 lguest_apic_read(u32 reg)
{
return 0;
}
static u64 lguest_apic_icr_read(void)
{
return 0;
}
static void lguest_apic_icr_write(u32 low, u32 id)
{
/* Warn to see if there's any stray references */
WARN_ON(1);
}
static void lguest_apic_wait_icr_idle(void)
{
return;
}
static u32 lguest_apic_safe_wait_icr_idle(void)
{
return 0;
}
static struct apic_ops lguest_basic_apic_ops = {
.read = lguest_apic_read,
.write = lguest_apic_write,
.icr_read = lguest_apic_icr_read,
.icr_write = lguest_apic_icr_write,
.wait_icr_idle = lguest_apic_wait_icr_idle,
.safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle,
};
#endif
/* STOP! Until an interrupt comes in. */
......@@ -990,8 +1021,7 @@ __init void lguest_init(void)
#ifdef CONFIG_X86_LOCAL_APIC
/* apic read/write intercepts */
pv_apic_ops.apic_write = lguest_apic_write;
pv_apic_ops.apic_read = lguest_apic_read;
apic_ops = &lguest_basic_apic_ops;
#endif
/* time operations */
......
......@@ -38,15 +38,6 @@ void __init pre_intr_init_hook(void)
init_ISA_irqs();
}
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
.handler = no_action,
.mask = CPU_MASK_NONE,
.name = "cascade",
};
/**
* intr_init_hook - post gate setup interrupt initialisation
*
......@@ -62,12 +53,6 @@ void __init intr_init_hook(void)
if (x86_quirks->arch_intr_init())
return;
}
#ifdef CONFIG_X86_LOCAL_APIC
apic_intr_init();
#endif
if (!acpi_ioapic)
setup_irq(2, &irq2);
}
/**
......
......@@ -9,4 +9,4 @@ obj-$(CONFIG_X86_NUMAQ) += numaq.o
obj-$(CONFIG_X86_SUMMIT) += summit.o
obj-$(CONFIG_X86_BIGSMP) += bigsmp.o
obj-$(CONFIG_X86_ES7000) += es7000.o
obj-$(CONFIG_X86_ES7000) += ../../x86/mach-es7000/
obj-$(CONFIG_X86_ES7000) += ../../x86/es7000/
......@@ -5,18 +5,17 @@
#define APIC_DEFINITION 1
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <asm/smp.h>
#include <asm/mpspec.h>
#include <asm/genapic.h>
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <asm/mach-bigsmp/mach_apic.h>
#include <asm/mach-bigsmp/mach_apicdef.h>
#include <asm/mach-bigsmp/mach_ipi.h>
#include <asm/bigsmp/apicdef.h>
#include <linux/smp.h>
#include <asm/bigsmp/apic.h>
#include <asm/bigsmp/ipi.h>
#include <asm/mach-default/mach_mpparse.h>
static int dmi_bigsmp; /* can be set by dmi scanners */
......
......@@ -4,20 +4,19 @@
#define APIC_DEFINITION 1
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <asm/smp.h>
#include <asm/mpspec.h>
#include <asm/genapic.h>
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <asm/mach-es7000/mach_apicdef.h>
#include <asm/mach-es7000/mach_apic.h>
#include <asm/mach-es7000/mach_ipi.h>
#include <asm/mach-es7000/mach_mpparse.h>
#include <asm/mach-es7000/mach_wakecpu.h>
#include <asm/es7000/apicdef.h>
#include <linux/smp.h>
#include <asm/es7000/apic.h>
#include <asm/es7000/ipi.h>
#include <asm/es7000/mpparse.h>
#include <asm/es7000/wakecpu.h>
static int probe_es7000(void)
{
......
......@@ -4,7 +4,6 @@
#define APIC_DEFINITION 1
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <asm/mpspec.h>
#include <asm/genapic.h>
#include <asm/fixmap.h>
......@@ -12,11 +11,12 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <asm/mach-numaq/mach_apic.h>
#include <asm/mach-numaq/mach_apicdef.h>
#include <asm/mach-numaq/mach_ipi.h>
#include <asm/mach-numaq/mach_mpparse.h>
#include <asm/mach-numaq/mach_wakecpu.h>
#include <asm/numaq/apicdef.h>
#include <linux/smp.h>
#include <asm/numaq/apic.h>
#include <asm/numaq/ipi.h>
#include <asm/numaq/mpparse.h>
#include <asm/numaq/wakecpu.h>
#include <asm/numaq.h>
static int mps_oem_check(struct mp_config_table *mpc, char *oem,
......
......@@ -4,19 +4,18 @@
#define APIC_DEFINITION 1
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <asm/smp.h>
#include <asm/mpspec.h>
#include <asm/genapic.h>
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <asm/mach-summit/mach_apic.h>
#include <asm/mach-summit/mach_apicdef.h>
#include <asm/mach-summit/mach_ipi.h>
#include <asm/mach-summit/mach_mpparse.h>
#include <asm/summit/apicdef.h>
#include <linux/smp.h>
#include <asm/summit/apic.h>
#include <asm/summit/ipi.h>
#include <asm/summit/mpparse.h>
static int probe_summit(void)
{
......
......@@ -250,10 +250,5 @@ int __init pci_acpi_init(void)
acpi_pci_irq_enable(dev);
}
#ifdef CONFIG_X86_IO_APIC
if (acpi_ioapic)
print_IO_APIC();
#endif
return 0;
}
......@@ -36,6 +36,7 @@
#include <xen/hvc-console.h>
#include <asm/paravirt.h>
#include <asm/apic.h>
#include <asm/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
......@@ -580,16 +581,47 @@ static void xen_io_delay(void)
}
#ifdef CONFIG_X86_LOCAL_APIC
static u32 xen_apic_read(unsigned long reg)
static u32 xen_apic_read(u32 reg)
{
return 0;
}
static void xen_apic_write(unsigned long reg, u32 val)
static void xen_apic_write(u32 reg, u32 val)
{
/* Warn to see if there's any stray references */
WARN_ON(1);
}
static u64 xen_apic_icr_read(void)
{
return 0;
}
static void xen_apic_icr_write(u32 low, u32 id)
{
/* Warn to see if there's any stray references */
WARN_ON(1);
}
static void xen_apic_wait_icr_idle(void)
{
return;
}
static u32 xen_safe_apic_wait_icr_idle(void)
{
return 0;
}
static struct apic_ops xen_basic_apic_ops = {
.read = xen_apic_read,
.write = xen_apic_write,
.icr_read = xen_apic_icr_read,
.icr_write = xen_apic_icr_write,
.wait_icr_idle = xen_apic_wait_icr_idle,
.safe_wait_icr_idle = xen_safe_apic_wait_icr_idle,
};
#endif
static void xen_flush_tlb(void)
......@@ -1273,8 +1305,6 @@ static const struct pv_irq_ops xen_irq_ops __initdata = {
static const struct pv_apic_ops xen_apic_ops __initdata = {
#ifdef CONFIG_X86_LOCAL_APIC
.apic_write = xen_apic_write,
.apic_read = xen_apic_read,
.setup_boot_clock = paravirt_nop,
.setup_secondary_clock = paravirt_nop,
.startup_ipi_hook = paravirt_nop,
......@@ -1677,6 +1707,13 @@ asmlinkage void __init xen_start_kernel(void)
pv_apic_ops = xen_apic_ops;
pv_mmu_ops = xen_mmu_ops;
#ifdef CONFIG_X86_LOCAL_APIC
/*
* set up the basic apic ops.
*/
apic_ops = &xen_basic_apic_ops;
#endif
if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
......
......@@ -26,6 +26,8 @@ obj-$(CONFIG_HT_IRQ) += htirq.o
# Build Intel IOMMU support
obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
#
# Some architectures use the generic PCI setup functions
#
......
#ifndef _DMA_REMAPPING_H
#define _DMA_REMAPPING_H
/*
* We need a fixed PAGE_SIZE of 4K irrespective of
* arch PAGE_SIZE for IOMMU page tables.
*/
#define PAGE_SHIFT_4K (12)
#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
/*
* 0: Present
* 1-11: Reserved
* 12-63: Context Ptr (12 - (haw-1))
* 64-127: Reserved
*/
struct root_entry {
u64 val;
u64 rsvd1;
};
#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
static inline bool root_present(struct root_entry *root)
{
return (root->val & 1);
}
static inline void set_root_present(struct root_entry *root)
{
root->val |= 1;
}
static inline void set_root_value(struct root_entry *root, unsigned long value)
{
root->val |= value & PAGE_MASK_4K;
}
struct context_entry;
static inline struct context_entry *
get_context_addr_from_root(struct root_entry *root)
{
return (struct context_entry *)
(root_present(root)?phys_to_virt(
root->val & PAGE_MASK_4K):
NULL);
}
/*
* low 64 bits:
* 0: present
* 1: fault processing disable
* 2-3: translation type
* 12-63: address space root
* high 64 bits:
* 0-2: address width
* 3-6: aval
* 8-23: domain id
*/
struct context_entry {
u64 lo;
u64 hi;
};
#define context_present(c) ((c).lo & 1)
#define context_fault_disable(c) (((c).lo >> 1) & 1)
#define context_translation_type(c) (((c).lo >> 2) & 3)
#define context_address_root(c) ((c).lo & PAGE_MASK_4K)
#define context_address_width(c) ((c).hi & 7)
#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
#define context_set_present(c) do {(c).lo |= 1;} while (0)
#define context_set_fault_enable(c) \
do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
#define context_set_translation_type(c, val) \
do { \
(c).lo &= (((u64)-1) << 4) | 3; \
(c).lo |= ((val) & 3) << 2; \
} while (0)
#define CONTEXT_TT_MULTI_LEVEL 0
#define context_set_address_root(c, val) \
do {(c).lo |= (val) & PAGE_MASK_4K;} while (0)
#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
#define context_set_domain_id(c, val) \
do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
/*
* 0: readable
* 1: writable
* 2-6: reserved
* 7: super page
* 8-11: available
* 12-63: Host physcial address
*/
struct dma_pte {
u64 val;
};
#define dma_clear_pte(p) do {(p).val = 0;} while (0)
#define DMA_PTE_READ (1)
#define DMA_PTE_WRITE (2)
#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
#define dma_set_pte_prot(p, prot) \
do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
#define dma_set_pte_addr(p, addr) do {\
(p).val |= ((addr) & PAGE_MASK_4K); } while (0)
#define dma_pte_present(p) (((p).val & 3) != 0)
struct intel_iommu;
struct dmar_domain {
int id; /* domain id */
struct intel_iommu *iommu; /* back pointer to owning iommu */
struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */
spinlock_t mapping_lock; /* page table lock */
int gaw; /* max guest address width */
/* adjusted guest address width, 0 is level 2 30-bit */
int agaw;
#define DOMAIN_FLAG_MULTIPLE_DEVICES 1
int flags;
};
/* PCI domain-device relationship */
struct device_domain_info {
struct list_head link; /* link to domain siblings */
struct list_head global; /* link to global list */
u8 bus; /* PCI bus numer */
u8 devfn; /* PCI devfn number */
struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
struct dmar_domain *domain; /* pointer to domain */
};
extern int init_dmars(void);
extern void free_dmar_iommu(struct intel_iommu *iommu);
extern int dmar_disabled;
#ifndef CONFIG_DMAR_GFX_WA
static inline void iommu_prepare_gfx_mapping(void)
{
return;
}
#endif /* !CONFIG_DMAR_GFX_WA */
#endif
This diff is collapsed.
......@@ -49,8 +49,6 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */
#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
......@@ -58,8 +56,6 @@ static void flush_unmaps_timeout(unsigned long data);
DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
static struct intel_iommu *g_iommus;
#define HIGH_WATER_MARK 250
struct deferred_flush_tables {
int next;
......@@ -185,13 +181,6 @@ void free_iova_mem(struct iova *iova)
kmem_cache_free(iommu_iova_cache, iova);
}
static inline void __iommu_flush_cache(
struct intel_iommu *iommu, void *addr, int size)
{
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(addr, size);
}
/* Gets context entry for a given bus and devfn */
static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
u8 bus, u8 devfn)
......@@ -488,19 +477,6 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
return 0;
}
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
{\
cycles_t start_time = get_cycles();\
while (1) {\
sts = op (iommu->reg + offset);\
if (cond)\
break;\
if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
panic("DMAR hardware is malfunctioning\n");\
cpu_relax();\
}\
}
static void iommu_set_root_entry(struct intel_iommu *iommu)
{
void *addr;
......@@ -990,6 +966,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
return -ENOMEM;
}
spin_lock_init(&iommu->lock);
/*
* if Caching mode is set, then invalid translations are tagged
* with domainid 0. Hence we need to pre-allocate it.
......@@ -998,62 +976,15 @@ static int iommu_init_domains(struct intel_iommu *iommu)
set_bit(0, iommu->domain_ids);
return 0;
}
static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
struct dmar_drhd_unit *drhd)
{
int ret;
int map_size;
u32 ver;
iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
if (!iommu->reg) {
printk(KERN_ERR "IOMMU: can't map the region\n");
goto error;
}
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
cap_max_fault_reg_offset(iommu->cap));
map_size = PAGE_ALIGN_4K(map_size);
if (map_size > PAGE_SIZE_4K) {
iounmap(iommu->reg);
iommu->reg = ioremap(drhd->reg_base_addr, map_size);
if (!iommu->reg) {
printk(KERN_ERR "IOMMU: can't map the region\n");
goto error;
}
}
ver = readl(iommu->reg + DMAR_VER_REG);
pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
iommu->cap, iommu->ecap);
ret = iommu_init_domains(iommu);
if (ret)
goto error_unmap;
spin_lock_init(&iommu->lock);
spin_lock_init(&iommu->register_lock);
drhd->iommu = iommu;
return iommu;
error_unmap:
iounmap(iommu->reg);
error:
kfree(iommu);
return NULL;
}
static void domain_exit(struct dmar_domain *domain);
static void free_iommu(struct intel_iommu *iommu)
void free_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
int i;
if (!iommu)
return;
i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
for (; i < cap_ndoms(iommu->cap); ) {
domain = iommu->domains[i];
......@@ -1078,10 +1009,6 @@ static void free_iommu(struct intel_iommu *iommu)
/* free context mapping */
free_context_table(iommu);
if (iommu->reg)
iounmap(iommu->reg);
kfree(iommu);
}
static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
......@@ -1426,37 +1353,6 @@ find_domain(struct pci_dev *pdev)
return NULL;
}
static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
struct pci_dev *dev)
{
int index;
while (dev) {
for (index = 0; index < cnt; index++)
if (dev == devices[index])
return 1;
/* Check our parent */
dev = dev->bus->self;
}
return 0;
}
static struct dmar_drhd_unit *
dmar_find_matched_drhd_unit(struct pci_dev *dev)
{
struct dmar_drhd_unit *drhd = NULL;
list_for_each_entry(drhd, &dmar_drhd_units, list) {
if (drhd->include_all || dmar_pci_device_match(drhd->devices,
drhd->devices_cnt, dev))
return drhd;
}
return NULL;
}
/* domain is initialized */
static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
{
......@@ -1729,8 +1625,6 @@ int __init init_dmars(void)
* endfor
*/
for_each_drhd_unit(drhd) {
if (drhd->ignored)
continue;
g_num_of_iommus++;
/*
* lock not needed as this is only incremented in the single
......@@ -1739,12 +1633,6 @@ int __init init_dmars(void)
*/
}
g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL);
if (!g_iommus) {
ret = -ENOMEM;
goto error;
}
deferred_flush = kzalloc(g_num_of_iommus *
sizeof(struct deferred_flush_tables), GFP_KERNEL);
if (!deferred_flush) {
......@@ -1752,16 +1640,15 @@ int __init init_dmars(void)
goto error;
}
i = 0;
for_each_drhd_unit(drhd) {
if (drhd->ignored)
continue;
iommu = alloc_iommu(&g_iommus[i], drhd);
i++;
if (!iommu) {
ret = -ENOMEM;
iommu = drhd->iommu;
ret = iommu_init_domains(iommu);
if (ret)
goto error;
}
/*
* TBD:
......@@ -1845,7 +1732,6 @@ int __init init_dmars(void)
iommu = drhd->iommu;
free_iommu(iommu);
}
kfree(g_iommus);
return ret;
}
......@@ -2002,7 +1888,10 @@ static void flush_unmaps(void)
/* just flush them all */
for (i = 0; i < g_num_of_iommus; i++) {
if (deferred_flush[i].next) {
iommu_flush_iotlb_global(&g_iommus[i], 0);
struct intel_iommu *iommu =
deferred_flush[i].domain[0]->iommu;
iommu_flush_iotlb_global(iommu, 0);
for (j = 0; j < deferred_flush[i].next; j++) {
__free_iova(&deferred_flush[i].domain[j]->iovad,
deferred_flush[i].iova[j]);
......@@ -2032,7 +1921,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
if (list_size == HIGH_WATER_MARK)
flush_unmaps();
iommu_id = dom->iommu - g_iommus;
iommu_id = dom->iommu->seq_id;
next = deferred_flush[iommu_id].next;
deferred_flush[iommu_id].domain[next] = dom;
deferred_flush[iommu_id].iova[next] = iova;
......@@ -2348,38 +2238,6 @@ static void __init iommu_exit_mempool(void)
}
static int blacklist_iommu(const struct dmi_system_id *id)
{
printk(KERN_INFO "%s detected; disabling IOMMU\n",
id->ident);
dmar_disabled = 1;
return 0;
}
static struct dmi_system_id __initdata intel_iommu_dmi_table[] = {
{ /* Some DG33BU BIOS revisions advertised non-existent VT-d */
.callback = blacklist_iommu,
.ident = "Intel DG33BU",
{ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "DG33BU"),
}
},
{ }
};
void __init detect_intel_iommu(void)
{
if (swiotlb || no_iommu || iommu_detected || dmar_disabled)
return;
if (early_dmar_detect()) {
dmi_check_system(intel_iommu_dmi_table);
if (dmar_disabled)
return;
iommu_detected = 1;
}
}
static void __init init_no_remapping_devices(void)
{
struct dmar_drhd_unit *drhd;
......@@ -2426,10 +2284,17 @@ int __init intel_iommu_init(void)
{
int ret = 0;
if (no_iommu || swiotlb || dmar_disabled)
if (dmar_table_init())
return -ENODEV;
if (dmar_table_init())
if (dmar_dev_scope_init())
return -ENODEV;
/*
* Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping.
*/
if (no_iommu || swiotlb || dmar_disabled)
return -ENODEV;
iommu_init_mempool();
......
This diff is collapsed.
This diff is collapsed.
#include "intel-iommu.h"
struct ioapic_scope {
struct intel_iommu *iommu;
unsigned int id;
};
#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
This diff is collapsed.
......@@ -105,6 +105,7 @@
#define APIC_TMICT 0x380
#define APIC_TMCCT 0x390
#define APIC_TDCR 0x3E0
#define APIC_SELF_IPI 0x3F0
#define APIC_TDR_DIV_TMBASE (1 << 2)
#define APIC_TDR_DIV_1 0xB
#define APIC_TDR_DIV_2 0x0
......@@ -128,6 +129,8 @@
#define APIC_EILVT3 0x530
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
#define APIC_BASE_MSR 0x800
#define X2APIC_ENABLE (1UL << 10)
#ifdef CONFIG_X86_32
# define MAX_IO_APICS 64
......
......@@ -12,8 +12,6 @@
/* these aren't arch hooks, they are generic routines
* that can be used by the hooks */
extern void init_ISA_irqs(void);
extern void apic_intr_init(void);
extern void smp_intr_init(void);
extern irqreturn_t timer_interrupt(int irq, void *dev_id);
/* these are the defined hooks */
......
#ifndef ASM_X86__MACH_BIGSMP__MACH_APIC_H
#define ASM_X86__MACH_BIGSMP__MACH_APIC_H
#ifndef __ASM_MACH_APIC_H
#define __ASM_MACH_APIC_H
#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
#define esr_disable (1)
......@@ -141,4 +141,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
return cpuid_apic >> index_msb;
}
#endif /* ASM_X86__MACH_BIGSMP__MACH_APIC_H */
#endif /* __ASM_MACH_APIC_H */
#ifndef ASM_X86__MACH_BIGSMP__MACH_APICDEF_H
#define ASM_X86__MACH_BIGSMP__MACH_APICDEF_H
#ifndef __ASM_MACH_APICDEF_H
#define __ASM_MACH_APICDEF_H
#define APIC_ID_MASK (0xFF<<24)
......@@ -10,4 +10,4 @@ static inline unsigned get_apic_id(unsigned long x)
#define GET_APIC_ID(x) get_apic_id(x)
#endif /* ASM_X86__MACH_BIGSMP__MACH_APICDEF_H */
#endif
#ifndef ASM_X86__MACH_BIGSMP__MACH_IPI_H
#define ASM_X86__MACH_BIGSMP__MACH_IPI_H
#ifndef __ASM_MACH_IPI_H
#define __ASM_MACH_IPI_H
void send_IPI_mask_sequence(cpumask_t mask, int vector);
......@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector)
send_IPI_mask(cpu_online_map, vector);
}
#endif /* ASM_X86__MACH_BIGSMP__MACH_IPI_H */
#endif /* __ASM_MACH_IPI_H */
......@@ -93,6 +93,7 @@
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
#define X86_FEATURE_XMM4_2 (4*32+20) /* Streaming SIMD Extensions-4.2 */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
......@@ -192,6 +193,7 @@ extern const char * const x86_power_flags[32];
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
......
This diff is collapsed.
This diff is collapsed.
......@@ -57,4 +57,7 @@ static inline void outb_pic(unsigned char value, unsigned int port)
extern struct irq_chip i8259A_chip;
extern void mask_8259A(void);
extern void unmask_8259A(void);
#endif /* ASM_X86__I8259_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment