Commit 8461689c authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/apic' into x86/platform

Merge in x86/apic to solve a vector_allocation_domain() API change semantic merge conflict.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents d48daf37 7eb9ae07
...@@ -306,7 +306,7 @@ struct apic { ...@@ -306,7 +306,7 @@ struct apic {
unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
unsigned long (*check_apicid_present)(int apicid); unsigned long (*check_apicid_present)(int apicid);
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); bool (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
void (*init_apic_ldr)(void); void (*init_apic_ldr)(void);
void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
...@@ -331,9 +331,9 @@ struct apic { ...@@ -331,9 +331,9 @@ struct apic {
unsigned long (*set_apic_id)(unsigned int id); unsigned long (*set_apic_id)(unsigned int id);
unsigned long apic_id_mask; unsigned long apic_id_mask;
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, const struct cpumask *andmask,
const struct cpumask *andmask); unsigned int *apicid);
/* ipi */ /* ipi */
void (*send_IPI_mask)(const struct cpumask *mask, int vector); void (*send_IPI_mask)(const struct cpumask *mask, int vector);
...@@ -537,6 +537,11 @@ static inline const struct cpumask *default_target_cpus(void) ...@@ -537,6 +537,11 @@ static inline const struct cpumask *default_target_cpus(void)
#endif #endif
} }
static inline const struct cpumask *online_target_cpus(void)
{
return cpu_online_mask;
}
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
...@@ -586,21 +591,50 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) ...@@ -586,21 +591,50 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
#endif #endif
static inline unsigned int static inline int
default_cpu_mask_to_apicid(const struct cpumask *cpumask) flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
{ {
return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
cpumask_bits(andmask)[0] &
cpumask_bits(cpu_online_mask)[0] &
APIC_ALL_CPUS;
if (likely(cpu_mask)) {
*apicid = (unsigned int)cpu_mask;
return 0;
} else {
return -EINVAL;
}
} }
static inline unsigned int extern int
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask) const struct cpumask *andmask,
unsigned int *apicid);
static inline bool
flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
{ {
unsigned long mask1 = cpumask_bits(cpumask)[0]; /* Careful. Some cpus do not strictly honor the set of cpus
unsigned long mask2 = cpumask_bits(andmask)[0]; * specified in the interrupt destination when using lowest
unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; * priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_clear(retmask);
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
return false;
}
return (unsigned int)(mask1 & mask2 & mask3); static inline bool
default_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
cpumask_copy(retmask, cpumask_of(cpu));
return true;
} }
static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
......
...@@ -9,15 +9,6 @@ ...@@ -9,15 +9,6 @@
#include <asm/ipi.h> #include <asm/ipi.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
/*
* Need to use more than cpu 0, because we need more vectors
* when MSI-X are used.
*/
static const struct cpumask *x2apic_target_cpus(void)
{
return cpu_online_mask;
}
static int x2apic_apic_id_valid(int apicid) static int x2apic_apic_id_valid(int apicid)
{ {
return 1; return 1;
...@@ -28,15 +19,6 @@ static int x2apic_apic_id_registered(void) ...@@ -28,15 +19,6 @@ static int x2apic_apic_id_registered(void)
return 1; return 1;
} }
/*
* For now each logical cpu is in its own vector allocation domain.
*/
static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
cpumask_clear(retmask);
cpumask_set_cpu(cpu, retmask);
}
static void static void
__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
{ {
......
...@@ -156,7 +156,6 @@ struct x86_cpuinit_ops { ...@@ -156,7 +156,6 @@ struct x86_cpuinit_ops {
/** /**
* struct x86_platform_ops - platform specific runtime functions * struct x86_platform_ops - platform specific runtime functions
* @calibrate_tsc: calibrate TSC * @calibrate_tsc: calibrate TSC
* @wallclock_init: init the wallclock device
* @get_wallclock: get time from HW clock like RTC etc. * @get_wallclock: get time from HW clock like RTC etc.
* @set_wallclock: set time back to HW clock * @set_wallclock: set time back to HW clock
* @is_untracked_pat_range exclude from PAT logic * @is_untracked_pat_range exclude from PAT logic
...@@ -168,7 +167,6 @@ struct x86_cpuinit_ops { ...@@ -168,7 +167,6 @@ struct x86_cpuinit_ops {
*/ */
struct x86_platform_ops { struct x86_platform_ops {
unsigned long (*calibrate_tsc)(void); unsigned long (*calibrate_tsc)(void);
void (*wallclock_init)(void);
unsigned long (*get_wallclock)(void); unsigned long (*get_wallclock)(void);
int (*set_wallclock)(unsigned long nowtime); int (*set_wallclock)(unsigned long nowtime);
void (*iommu_shutdown)(void); void (*iommu_shutdown)(void);
......
...@@ -2123,6 +2123,25 @@ void default_init_apic_ldr(void) ...@@ -2123,6 +2123,25 @@ void default_init_apic_ldr(void)
apic_write(APIC_LDR, val); apic_write(APIC_LDR, val);
} }
int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
{
unsigned int cpu;
for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
}
if (likely(cpu < nr_cpu_ids)) {
*apicid = per_cpu(x86_cpu_to_apicid, cpu);
return 0;
}
return -EINVAL;
}
/* /*
* Power management * Power management
*/ */
......
...@@ -36,25 +36,6 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -36,25 +36,6 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 1; return 1;
} }
static const struct cpumask *flat_target_cpus(void)
{
return cpu_online_mask;
}
static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_clear(retmask);
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
/* /*
* Set up the logical destination ID. * Set up the logical destination ID.
* *
...@@ -92,7 +73,7 @@ static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) ...@@ -92,7 +73,7 @@ static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
} }
static void static void
flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
{ {
unsigned long mask = cpumask_bits(cpumask)[0]; unsigned long mask = cpumask_bits(cpumask)[0];
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -186,7 +167,7 @@ static struct apic apic_flat = { ...@@ -186,7 +167,7 @@ static struct apic apic_flat = {
.irq_delivery_mode = dest_LowestPrio, .irq_delivery_mode = dest_LowestPrio,
.irq_dest_mode = 1, /* logical */ .irq_dest_mode = 1, /* logical */
.target_cpus = flat_target_cpus, .target_cpus = online_target_cpus,
.disable_esr = 0, .disable_esr = 0,
.dest_logical = APIC_DEST_LOGICAL, .dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL, .check_apicid_used = NULL,
...@@ -210,8 +191,7 @@ static struct apic apic_flat = { ...@@ -210,8 +191,7 @@ static struct apic apic_flat = {
.set_apic_id = set_apic_id, .set_apic_id = set_apic_id,
.apic_id_mask = 0xFFu << 24, .apic_id_mask = 0xFFu << 24,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = flat_send_IPI_mask, .send_IPI_mask = flat_send_IPI_mask,
.send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
...@@ -262,17 +242,6 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -262,17 +242,6 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 0; return 0;
} }
static const struct cpumask *physflat_target_cpus(void)
{
return cpu_online_mask;
}
static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
cpumask_clear(retmask);
cpumask_set_cpu(cpu, retmask);
}
static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
{ {
default_send_IPI_mask_sequence_phys(cpumask, vector); default_send_IPI_mask_sequence_phys(cpumask, vector);
...@@ -294,38 +263,6 @@ static void physflat_send_IPI_all(int vector) ...@@ -294,38 +263,6 @@ static void physflat_send_IPI_all(int vector)
physflat_send_IPI_mask(cpu_online_mask, vector); physflat_send_IPI_mask(cpu_online_mask, vector);
} }
static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = cpumask_first(cpumask);
if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
else
return BAD_APICID;
}
static unsigned int
physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
}
return per_cpu(x86_cpu_to_apicid, cpu);
}
static int physflat_probe(void) static int physflat_probe(void)
{ {
if (apic == &apic_physflat || num_possible_cpus() > 8) if (apic == &apic_physflat || num_possible_cpus() > 8)
...@@ -345,13 +282,13 @@ static struct apic apic_physflat = { ...@@ -345,13 +282,13 @@ static struct apic apic_physflat = {
.irq_delivery_mode = dest_Fixed, .irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* physical */ .irq_dest_mode = 0, /* physical */
.target_cpus = physflat_target_cpus, .target_cpus = online_target_cpus,
.disable_esr = 0, .disable_esr = 0,
.dest_logical = 0, .dest_logical = 0,
.check_apicid_used = NULL, .check_apicid_used = NULL,
.check_apicid_present = NULL, .check_apicid_present = NULL,
.vector_allocation_domain = physflat_vector_allocation_domain, .vector_allocation_domain = default_vector_allocation_domain,
/* not needed, but shouldn't hurt: */ /* not needed, but shouldn't hurt: */
.init_apic_ldr = flat_init_apic_ldr, .init_apic_ldr = flat_init_apic_ldr,
...@@ -370,8 +307,7 @@ static struct apic apic_physflat = { ...@@ -370,8 +307,7 @@ static struct apic apic_physflat = {
.set_apic_id = set_apic_id, .set_apic_id = set_apic_id,
.apic_id_mask = 0xFFu << 24, .apic_id_mask = 0xFFu << 24,
.cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
.send_IPI_mask = physflat_send_IPI_mask, .send_IPI_mask = physflat_send_IPI_mask,
.send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
......
...@@ -100,12 +100,12 @@ static unsigned long noop_check_apicid_present(int bit) ...@@ -100,12 +100,12 @@ static unsigned long noop_check_apicid_present(int bit)
return physid_isset(bit, phys_cpu_present_map); return physid_isset(bit, phys_cpu_present_map);
} }
static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) static bool noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
{ {
if (cpu != 0) if (cpu != 0)
pr_warning("APIC: Vector allocated for non-BSP cpu\n"); pr_warning("APIC: Vector allocated for non-BSP cpu\n");
cpumask_clear(retmask); cpumask_copy(retmask, cpumask_of(cpu));
cpumask_set_cpu(cpu, retmask); return true;
} }
static u32 noop_apic_read(u32 reg) static u32 noop_apic_read(u32 reg)
...@@ -159,8 +159,7 @@ struct apic apic_noop = { ...@@ -159,8 +159,7 @@ struct apic apic_noop = {
.set_apic_id = NULL, .set_apic_id = NULL,
.apic_id_mask = 0x0F << 24, .apic_id_mask = 0x0F << 24,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = noop_send_IPI_mask, .send_IPI_mask = noop_send_IPI_mask,
.send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself, .send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself,
......
...@@ -72,17 +72,6 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb) ...@@ -72,17 +72,6 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
return initial_apic_id >> index_msb; return initial_apic_id >> index_msb;
} }
static const struct cpumask *numachip_target_cpus(void)
{
return cpu_online_mask;
}
static void numachip_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
cpumask_clear(retmask);
cpumask_set_cpu(cpu, retmask);
}
static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
{ {
union numachip_csr_g3_ext_irq_gen int_gen; union numachip_csr_g3_ext_irq_gen int_gen;
...@@ -157,38 +146,6 @@ static void numachip_send_IPI_self(int vector) ...@@ -157,38 +146,6 @@ static void numachip_send_IPI_self(int vector)
__default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
} }
static unsigned int numachip_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = cpumask_first(cpumask);
if (likely((unsigned)cpu < nr_cpu_ids))
return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID;
}
static unsigned int
numachip_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
}
return per_cpu(x86_cpu_to_apicid, cpu);
}
static int __init numachip_probe(void) static int __init numachip_probe(void)
{ {
return apic == &apic_numachip; return apic == &apic_numachip;
...@@ -253,13 +210,13 @@ static struct apic apic_numachip __refconst = { ...@@ -253,13 +210,13 @@ static struct apic apic_numachip __refconst = {
.irq_delivery_mode = dest_Fixed, .irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* physical */ .irq_dest_mode = 0, /* physical */
.target_cpus = numachip_target_cpus, .target_cpus = online_target_cpus,
.disable_esr = 0, .disable_esr = 0,
.dest_logical = 0, .dest_logical = 0,
.check_apicid_used = NULL, .check_apicid_used = NULL,
.check_apicid_present = NULL, .check_apicid_present = NULL,
.vector_allocation_domain = numachip_vector_allocation_domain, .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = flat_init_apic_ldr, .init_apic_ldr = flat_init_apic_ldr,
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
...@@ -277,8 +234,7 @@ static struct apic apic_numachip __refconst = { ...@@ -277,8 +234,7 @@ static struct apic apic_numachip __refconst = {
.set_apic_id = set_apic_id, .set_apic_id = set_apic_id,
.apic_id_mask = 0xffU << 24, .apic_id_mask = 0xffU << 24,
.cpu_mask_to_apicid = numachip_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid_and = numachip_cpu_mask_to_apicid_and,
.send_IPI_mask = numachip_send_IPI_mask, .send_IPI_mask = numachip_send_IPI_mask,
.send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself, .send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself,
......
...@@ -26,15 +26,6 @@ static int bigsmp_apic_id_registered(void) ...@@ -26,15 +26,6 @@ static int bigsmp_apic_id_registered(void)
return 1; return 1;
} }
static const struct cpumask *bigsmp_target_cpus(void)
{
#ifdef CONFIG_SMP
return cpu_online_mask;
#else
return cpumask_of(0);
#endif
}
static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid) static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
{ {
return 0; return 0;
...@@ -105,32 +96,6 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid) ...@@ -105,32 +96,6 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid)
return 1; return 1;
} }
/* As we are using single CPU as destination, pick only one CPU here */
static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
int cpu = cpumask_first(cpumask);
if (cpu < nr_cpu_ids)
return cpu_physical_id(cpu);
return BAD_APICID;
}
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
return cpu_physical_id(cpu);
}
return BAD_APICID;
}
static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
{ {
return cpuid_apic >> index_msb; return cpuid_apic >> index_msb;
...@@ -177,12 +142,6 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { ...@@ -177,12 +142,6 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
{ } /* NULL entry stops DMI scanning */ { } /* NULL entry stops DMI scanning */
}; };
static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
cpumask_clear(retmask);
cpumask_set_cpu(cpu, retmask);
}
static int probe_bigsmp(void) static int probe_bigsmp(void)
{ {
if (def_to_bigsmp) if (def_to_bigsmp)
...@@ -205,13 +164,13 @@ static struct apic apic_bigsmp = { ...@@ -205,13 +164,13 @@ static struct apic apic_bigsmp = {
/* phys delivery to target CPU: */ /* phys delivery to target CPU: */
.irq_dest_mode = 0, .irq_dest_mode = 0,
.target_cpus = bigsmp_target_cpus, .target_cpus = default_target_cpus,
.disable_esr = 1, .disable_esr = 1,
.dest_logical = 0, .dest_logical = 0,
.check_apicid_used = bigsmp_check_apicid_used, .check_apicid_used = bigsmp_check_apicid_used,
.check_apicid_present = bigsmp_check_apicid_present, .check_apicid_present = bigsmp_check_apicid_present,
.vector_allocation_domain = bigsmp_vector_allocation_domain, .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = bigsmp_init_apic_ldr, .init_apic_ldr = bigsmp_init_apic_ldr,
.ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
...@@ -229,8 +188,7 @@ static struct apic apic_bigsmp = { ...@@ -229,8 +188,7 @@ static struct apic apic_bigsmp = {
.set_apic_id = NULL, .set_apic_id = NULL,
.apic_id_mask = 0xFF << 24, .apic_id_mask = 0xFF << 24,
.cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
.send_IPI_mask = bigsmp_send_IPI_mask, .send_IPI_mask = bigsmp_send_IPI_mask,
.send_IPI_mask_allbutself = NULL, .send_IPI_mask_allbutself = NULL,
......
...@@ -394,21 +394,6 @@ static void es7000_enable_apic_mode(void) ...@@ -394,21 +394,6 @@ static void es7000_enable_apic_mode(void)
WARN(1, "Command failed, status = %x\n", mip_status); WARN(1, "Command failed, status = %x\n", mip_status);
} }
static void es7000_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_clear(retmask);
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
static void es7000_wait_for_init_deassert(atomic_t *deassert) static void es7000_wait_for_init_deassert(atomic_t *deassert)
{ {
while (!atomic_read(deassert)) while (!atomic_read(deassert))
...@@ -540,45 +525,49 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid) ...@@ -540,45 +525,49 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
return 1; return 1;
} }
static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask) static inline int
es7000_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
{ {
unsigned int round = 0; unsigned int round = 0;
int cpu, uninitialized_var(apicid); unsigned int cpu, uninitialized_var(apicid);
/* /*
* The cpus in the mask must all be on the apic cluster. * The cpus in the mask must all be on the apic cluster.
*/ */
for_each_cpu(cpu, cpumask) { for_each_cpu_and(cpu, cpumask, cpu_online_mask) {
int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
WARN(1, "Not a valid mask!"); WARN(1, "Not a valid mask!");
return BAD_APICID; return -EINVAL;
} }
apicid = new_apicid; apicid |= new_apicid;
round++; round++;
} }
return apicid; if (!round)
return -EINVAL;
*dest_id = apicid;
return 0;
} }
static unsigned int static int
es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
const struct cpumask *andmask) const struct cpumask *andmask,
unsigned int *apicid)
{ {
int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
cpumask_var_t cpumask; cpumask_var_t cpumask;
*apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
return apicid; return 0;
cpumask_and(cpumask, inmask, andmask); cpumask_and(cpumask, inmask, andmask);
cpumask_and(cpumask, cpumask, cpu_online_mask); es7000_cpu_mask_to_apicid(cpumask, apicid);
apicid = es7000_cpu_mask_to_apicid(cpumask);
free_cpumask_var(cpumask); free_cpumask_var(cpumask);
return apicid; return 0;
} }
static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
...@@ -638,7 +627,7 @@ static struct apic __refdata apic_es7000_cluster = { ...@@ -638,7 +627,7 @@ static struct apic __refdata apic_es7000_cluster = {
.check_apicid_used = es7000_check_apicid_used, .check_apicid_used = es7000_check_apicid_used,
.check_apicid_present = es7000_check_apicid_present, .check_apicid_present = es7000_check_apicid_present,
.vector_allocation_domain = es7000_vector_allocation_domain, .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = es7000_init_apic_ldr_cluster, .init_apic_ldr = es7000_init_apic_ldr_cluster,
.ioapic_phys_id_map = es7000_ioapic_phys_id_map, .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
...@@ -656,7 +645,6 @@ static struct apic __refdata apic_es7000_cluster = { ...@@ -656,7 +645,6 @@ static struct apic __refdata apic_es7000_cluster = {
.set_apic_id = NULL, .set_apic_id = NULL,
.apic_id_mask = 0xFF << 24, .apic_id_mask = 0xFF << 24,
.cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
.send_IPI_mask = es7000_send_IPI_mask, .send_IPI_mask = es7000_send_IPI_mask,
...@@ -705,7 +693,7 @@ static struct apic __refdata apic_es7000 = { ...@@ -705,7 +693,7 @@ static struct apic __refdata apic_es7000 = {
.check_apicid_used = es7000_check_apicid_used, .check_apicid_used = es7000_check_apicid_used,
.check_apicid_present = es7000_check_apicid_present, .check_apicid_present = es7000_check_apicid_present,
.vector_allocation_domain = es7000_vector_allocation_domain, .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = es7000_init_apic_ldr, .init_apic_ldr = es7000_init_apic_ldr,
.ioapic_phys_id_map = es7000_ioapic_phys_id_map, .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
...@@ -723,7 +711,6 @@ static struct apic __refdata apic_es7000 = { ...@@ -723,7 +711,6 @@ static struct apic __refdata apic_es7000 = {
.set_apic_id = NULL, .set_apic_id = NULL,
.apic_id_mask = 0xFF << 24, .apic_id_mask = 0xFF << 24,
.cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
.send_IPI_mask = es7000_send_IPI_mask, .send_IPI_mask = es7000_send_IPI_mask,
......
This diff is collapsed.
...@@ -406,16 +406,13 @@ static inline int numaq_check_phys_apicid_present(int phys_apicid) ...@@ -406,16 +406,13 @@ static inline int numaq_check_phys_apicid_present(int phys_apicid)
* We use physical apicids here, not logical, so just return the default * We use physical apicids here, not logical, so just return the default
* physical broadcast to stop people from breaking us * physical broadcast to stop people from breaking us
*/ */
static unsigned int numaq_cpu_mask_to_apicid(const struct cpumask *cpumask) static int
{
return 0x0F;
}
static inline unsigned int
numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask, numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask) const struct cpumask *andmask,
unsigned int *apicid)
{ {
return 0x0F; *apicid = 0x0F;
return 0;
} }
/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ /* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
...@@ -441,20 +438,6 @@ static int probe_numaq(void) ...@@ -441,20 +438,6 @@ static int probe_numaq(void)
return found_numaq; return found_numaq;
} }
static void numaq_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_clear(retmask);
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
static void numaq_setup_portio_remap(void) static void numaq_setup_portio_remap(void)
{ {
int num_quads = num_online_nodes(); int num_quads = num_online_nodes();
...@@ -491,7 +474,7 @@ static struct apic __refdata apic_numaq = { ...@@ -491,7 +474,7 @@ static struct apic __refdata apic_numaq = {
.check_apicid_used = numaq_check_apicid_used, .check_apicid_used = numaq_check_apicid_used,
.check_apicid_present = numaq_check_apicid_present, .check_apicid_present = numaq_check_apicid_present,
.vector_allocation_domain = numaq_vector_allocation_domain, .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = numaq_init_apic_ldr, .init_apic_ldr = numaq_init_apic_ldr,
.ioapic_phys_id_map = numaq_ioapic_phys_id_map, .ioapic_phys_id_map = numaq_ioapic_phys_id_map,
...@@ -509,7 +492,6 @@ static struct apic __refdata apic_numaq = { ...@@ -509,7 +492,6 @@ static struct apic __refdata apic_numaq = {
.set_apic_id = NULL, .set_apic_id = NULL,
.apic_id_mask = 0x0F << 24, .apic_id_mask = 0x0F << 24,
.cpu_mask_to_apicid = numaq_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and, .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
.send_IPI_mask = numaq_send_IPI_mask, .send_IPI_mask = numaq_send_IPI_mask,
......
...@@ -66,21 +66,6 @@ static void setup_apic_flat_routing(void) ...@@ -66,21 +66,6 @@ static void setup_apic_flat_routing(void)
#endif #endif
} }
static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
/*
* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_clear(retmask);
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
/* should be called last. */ /* should be called last. */
static int probe_default(void) static int probe_default(void)
{ {
...@@ -105,7 +90,7 @@ static struct apic apic_default = { ...@@ -105,7 +90,7 @@ static struct apic apic_default = {
.check_apicid_used = default_check_apicid_used, .check_apicid_used = default_check_apicid_used,
.check_apicid_present = default_check_apicid_present, .check_apicid_present = default_check_apicid_present,
.vector_allocation_domain = default_vector_allocation_domain, .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = default_init_apic_ldr, .init_apic_ldr = default_init_apic_ldr,
.ioapic_phys_id_map = default_ioapic_phys_id_map, .ioapic_phys_id_map = default_ioapic_phys_id_map,
...@@ -123,8 +108,7 @@ static struct apic apic_default = { ...@@ -123,8 +108,7 @@ static struct apic apic_default = {
.set_apic_id = NULL, .set_apic_id = NULL,
.apic_id_mask = 0x0F << 24, .apic_id_mask = 0x0F << 24,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = default_send_IPI_mask_logical, .send_IPI_mask = default_send_IPI_mask_logical,
.send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical, .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical,
......
...@@ -263,43 +263,48 @@ static int summit_check_phys_apicid_present(int physical_apicid) ...@@ -263,43 +263,48 @@ static int summit_check_phys_apicid_present(int physical_apicid)
return 1; return 1;
} }
static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) static inline int
summit_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
{ {
unsigned int round = 0; unsigned int round = 0;
int cpu, apicid = 0; unsigned int cpu, apicid = 0;
/* /*
* The cpus in the mask must all be on the apic cluster. * The cpus in the mask must all be on the apic cluster.
*/ */
for_each_cpu(cpu, cpumask) { for_each_cpu_and(cpu, cpumask, cpu_online_mask) {
int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
printk("%s: Not a valid mask!\n", __func__); printk("%s: Not a valid mask!\n", __func__);
return BAD_APICID; return -EINVAL;
} }
apicid |= new_apicid; apicid |= new_apicid;
round++; round++;
} }
return apicid; if (!round)
return -EINVAL;
*dest_id = apicid;
return 0;
} }
static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, static int
const struct cpumask *andmask) summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
const struct cpumask *andmask,
unsigned int *apicid)
{ {
int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
cpumask_var_t cpumask; cpumask_var_t cpumask;
*apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
return apicid; return 0;
cpumask_and(cpumask, inmask, andmask); cpumask_and(cpumask, inmask, andmask);
cpumask_and(cpumask, cpumask, cpu_online_mask); summit_cpu_mask_to_apicid(cpumask, apicid);
apicid = summit_cpu_mask_to_apicid(cpumask);
free_cpumask_var(cpumask); free_cpumask_var(cpumask);
return apicid; return 0;
} }
/* /*
...@@ -320,20 +325,6 @@ static int probe_summit(void) ...@@ -320,20 +325,6 @@ static int probe_summit(void)
return 0; return 0;
} }
static void summit_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_clear(retmask);
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
#ifdef CONFIG_X86_SUMMIT_NUMA #ifdef CONFIG_X86_SUMMIT_NUMA
static struct rio_table_hdr *rio_table_hdr; static struct rio_table_hdr *rio_table_hdr;
static struct scal_detail *scal_devs[MAX_NUMNODES]; static struct scal_detail *scal_devs[MAX_NUMNODES];
...@@ -509,7 +500,7 @@ static struct apic apic_summit = { ...@@ -509,7 +500,7 @@ static struct apic apic_summit = {
.check_apicid_used = summit_check_apicid_used, .check_apicid_used = summit_check_apicid_used,
.check_apicid_present = summit_check_apicid_present, .check_apicid_present = summit_check_apicid_present,
.vector_allocation_domain = summit_vector_allocation_domain, .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = summit_init_apic_ldr, .init_apic_ldr = summit_init_apic_ldr,
.ioapic_phys_id_map = summit_ioapic_phys_id_map, .ioapic_phys_id_map = summit_ioapic_phys_id_map,
...@@ -527,7 +518,6 @@ static struct apic apic_summit = { ...@@ -527,7 +518,6 @@ static struct apic apic_summit = {
.set_apic_id = NULL, .set_apic_id = NULL,
.apic_id_mask = 0xFF << 24, .apic_id_mask = 0xFF << 24,
.cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and, .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
.send_IPI_mask = summit_send_IPI_mask, .send_IPI_mask = summit_send_IPI_mask,
......
...@@ -81,7 +81,7 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) ...@@ -81,7 +81,7 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
} }
static void static void
x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
{ {
__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT); __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
} }
...@@ -96,36 +96,37 @@ static void x2apic_send_IPI_all(int vector) ...@@ -96,36 +96,37 @@ static void x2apic_send_IPI_all(int vector)
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
} }
static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) static int
x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
{ {
/* u32 dest = 0;
* We're using fixed IRQ delivery, can only return one logical APIC ID. u16 cluster;
* May as well be the first. int i;
*/
int cpu = cpumask_first(cpumask);
if ((unsigned)cpu < nr_cpu_ids) for_each_cpu_and(i, cpumask, andmask) {
return per_cpu(x86_cpu_to_logical_apicid, cpu); if (!cpumask_test_cpu(i, cpu_online_mask))
else continue;
return BAD_APICID; dest = per_cpu(x86_cpu_to_logical_apicid, i);
} cluster = x2apic_cluster(i);
break;
}
static unsigned int if (!dest)
x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, return -EINVAL;
const struct cpumask *andmask)
{
int cpu;
/* for_each_cpu_and(i, cpumask, andmask) {
* We're using fixed IRQ delivery, can only return one logical APIC ID. if (!cpumask_test_cpu(i, cpu_online_mask))
* May as well be the first. continue;
*/ if (cluster != x2apic_cluster(i))
for_each_cpu_and(cpu, cpumask, andmask) { continue;
if (cpumask_test_cpu(cpu, cpu_online_mask)) dest |= per_cpu(x86_cpu_to_logical_apicid, i);
break;
} }
return per_cpu(x86_cpu_to_logical_apicid, cpu); *apicid = dest;
return 0;
} }
static void init_x2apic_ldr(void) static void init_x2apic_ldr(void)
...@@ -208,6 +209,16 @@ static int x2apic_cluster_probe(void) ...@@ -208,6 +209,16 @@ static int x2apic_cluster_probe(void)
return 0; return 0;
} }
/*
* Each x2apic cluster is an allocation domain.
*/
static bool cluster_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
cpumask_clear(retmask);
cpumask_copy(retmask, per_cpu(cpus_in_cluster, cpu));
return true;
}
static struct apic apic_x2apic_cluster = { static struct apic apic_x2apic_cluster = {
.name = "cluster x2apic", .name = "cluster x2apic",
...@@ -219,13 +230,13 @@ static struct apic apic_x2apic_cluster = { ...@@ -219,13 +230,13 @@ static struct apic apic_x2apic_cluster = {
.irq_delivery_mode = dest_LowestPrio, .irq_delivery_mode = dest_LowestPrio,
.irq_dest_mode = 1, /* logical */ .irq_dest_mode = 1, /* logical */
.target_cpus = x2apic_target_cpus, .target_cpus = online_target_cpus,
.disable_esr = 0, .disable_esr = 0,
.dest_logical = APIC_DEST_LOGICAL, .dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL, .check_apicid_used = NULL,
.check_apicid_present = NULL, .check_apicid_present = NULL,
.vector_allocation_domain = x2apic_vector_allocation_domain, .vector_allocation_domain = cluster_vector_allocation_domain,
.init_apic_ldr = init_x2apic_ldr, .init_apic_ldr = init_x2apic_ldr,
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
...@@ -243,7 +254,6 @@ static struct apic apic_x2apic_cluster = { ...@@ -243,7 +254,6 @@ static struct apic apic_x2apic_cluster = {
.set_apic_id = x2apic_set_apic_id, .set_apic_id = x2apic_set_apic_id,
.apic_id_mask = 0xFFFFFFFFu, .apic_id_mask = 0xFFFFFFFFu,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
.send_IPI_mask = x2apic_send_IPI_mask, .send_IPI_mask = x2apic_send_IPI_mask,
......
...@@ -76,38 +76,6 @@ static void x2apic_send_IPI_all(int vector) ...@@ -76,38 +76,6 @@ static void x2apic_send_IPI_all(int vector)
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
} }
static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
int cpu = cpumask_first(cpumask);
if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
else
return BAD_APICID;
}
static unsigned int
x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
}
return per_cpu(x86_cpu_to_apicid, cpu);
}
static void init_x2apic_ldr(void) static void init_x2apic_ldr(void)
{ {
} }
...@@ -131,13 +99,13 @@ static struct apic apic_x2apic_phys = { ...@@ -131,13 +99,13 @@ static struct apic apic_x2apic_phys = {
.irq_delivery_mode = dest_Fixed, .irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* physical */ .irq_dest_mode = 0, /* physical */
.target_cpus = x2apic_target_cpus, .target_cpus = online_target_cpus,
.disable_esr = 0, .disable_esr = 0,
.dest_logical = 0, .dest_logical = 0,
.check_apicid_used = NULL, .check_apicid_used = NULL,
.check_apicid_present = NULL, .check_apicid_present = NULL,
.vector_allocation_domain = x2apic_vector_allocation_domain, .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = init_x2apic_ldr, .init_apic_ldr = init_x2apic_ldr,
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
...@@ -155,8 +123,7 @@ static struct apic apic_x2apic_phys = { ...@@ -155,8 +123,7 @@ static struct apic apic_x2apic_phys = {
.set_apic_id = x2apic_set_apic_id, .set_apic_id = x2apic_set_apic_id,
.apic_id_mask = 0xFFFFFFFFu, .apic_id_mask = 0xFFFFFFFFu,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
.send_IPI_mask = x2apic_send_IPI_mask, .send_IPI_mask = x2apic_send_IPI_mask,
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
......
...@@ -185,17 +185,6 @@ EXPORT_SYMBOL_GPL(uv_possible_blades); ...@@ -185,17 +185,6 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
unsigned long sn_rtc_cycles_per_second; unsigned long sn_rtc_cycles_per_second;
EXPORT_SYMBOL(sn_rtc_cycles_per_second); EXPORT_SYMBOL(sn_rtc_cycles_per_second);
static const struct cpumask *uv_target_cpus(void)
{
return cpu_online_mask;
}
static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
cpumask_clear(retmask);
cpumask_set_cpu(cpu, retmask);
}
static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -280,25 +269,12 @@ static void uv_init_apic_ldr(void) ...@@ -280,25 +269,12 @@ static void uv_init_apic_ldr(void)
{ {
} }
static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) static int
{
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
int cpu = cpumask_first(cpumask);
if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
else
return BAD_APICID;
}
static unsigned int
uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask) const struct cpumask *andmask,
unsigned int *apicid)
{ {
int cpu; int unsigned cpu;
/* /*
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
...@@ -308,7 +284,13 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, ...@@ -308,7 +284,13 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
if (cpumask_test_cpu(cpu, cpu_online_mask)) if (cpumask_test_cpu(cpu, cpu_online_mask))
break; break;
} }
return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
if (likely(cpu < nr_cpu_ids)) {
*apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
return 0;
}
return -EINVAL;
} }
static unsigned int x2apic_get_apic_id(unsigned long x) static unsigned int x2apic_get_apic_id(unsigned long x)
...@@ -362,13 +344,13 @@ static struct apic __refdata apic_x2apic_uv_x = { ...@@ -362,13 +344,13 @@ static struct apic __refdata apic_x2apic_uv_x = {
.irq_delivery_mode = dest_Fixed, .irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* physical */ .irq_dest_mode = 0, /* physical */
.target_cpus = uv_target_cpus, .target_cpus = online_target_cpus,
.disable_esr = 0, .disable_esr = 0,
.dest_logical = APIC_DEST_LOGICAL, .dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL, .check_apicid_used = NULL,
.check_apicid_present = NULL, .check_apicid_present = NULL,
.vector_allocation_domain = uv_vector_allocation_domain, .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = uv_init_apic_ldr, .init_apic_ldr = uv_init_apic_ldr,
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
...@@ -386,7 +368,6 @@ static struct apic __refdata apic_x2apic_uv_x = { ...@@ -386,7 +368,6 @@ static struct apic __refdata apic_x2apic_uv_x = {
.set_apic_id = set_apic_id, .set_apic_id = set_apic_id,
.apic_id_mask = 0xFFFFFFFFu, .apic_id_mask = 0xFFFFFFFFu,
.cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
.send_IPI_mask = uv_send_IPI_mask, .send_IPI_mask = uv_send_IPI_mask,
......
...@@ -119,7 +119,7 @@ static __init void early_serial_init(char *s) ...@@ -119,7 +119,7 @@ static __init void early_serial_init(char *s)
unsigned char c; unsigned char c;
unsigned divisor; unsigned divisor;
unsigned baud = DEFAULT_BAUD; unsigned baud = DEFAULT_BAUD;
char *e; ssize_t ret;
if (*s == ',') if (*s == ',')
++s; ++s;
...@@ -127,14 +127,14 @@ static __init void early_serial_init(char *s) ...@@ -127,14 +127,14 @@ static __init void early_serial_init(char *s)
if (*s) { if (*s) {
unsigned port; unsigned port;
if (!strncmp(s, "0x", 2)) { if (!strncmp(s, "0x", 2)) {
early_serial_base = simple_strtoul(s, &e, 16); ret = kstrtoint(s, 16, &early_serial_base);
} else { } else {
static const int __initconst bases[] = { 0x3f8, 0x2f8 }; static const int __initconst bases[] = { 0x3f8, 0x2f8 };
if (!strncmp(s, "ttyS", 4)) if (!strncmp(s, "ttyS", 4))
s += 4; s += 4;
port = simple_strtoul(s, &e, 10); ret = kstrtouint(s, 10, &port);
if (port > 1 || s == e) if (ret || port > 1)
port = 0; port = 0;
early_serial_base = bases[port]; early_serial_base = bases[port];
} }
...@@ -149,8 +149,8 @@ static __init void early_serial_init(char *s) ...@@ -149,8 +149,8 @@ static __init void early_serial_init(char *s)
outb(0x3, early_serial_base + MCR); /* DTR + RTS */ outb(0x3, early_serial_base + MCR); /* DTR + RTS */
if (*s) { if (*s) {
baud = simple_strtoul(s, &e, 0); ret = kstrtouint(s, 0, &baud);
if (baud == 0 || s == e) if (ret || baud == 0)
baud = DEFAULT_BAUD; baud = DEFAULT_BAUD;
} }
......
...@@ -1031,8 +1031,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -1031,8 +1031,6 @@ void __init setup_arch(char **cmdline_p)
x86_init.timers.wallclock_init(); x86_init.timers.wallclock_init();
x86_platform.wallclock_init();
mcheck_init(); mcheck_init();
arch_init_ideal_nops(); arch_init_ideal_nops();
......
...@@ -29,7 +29,6 @@ void __init x86_init_uint_noop(unsigned int unused) { } ...@@ -29,7 +29,6 @@ void __init x86_init_uint_noop(unsigned int unused) { }
void __init x86_init_pgd_noop(pgd_t *unused) { } void __init x86_init_pgd_noop(pgd_t *unused) { }
int __init iommu_init_noop(void) { return 0; } int __init iommu_init_noop(void) { return 0; }
void iommu_shutdown_noop(void) { } void iommu_shutdown_noop(void) { }
void wallclock_init_noop(void) { }
/* /*
* The platform setup functions are preset with the default functions * The platform setup functions are preset with the default functions
...@@ -101,7 +100,6 @@ static int default_i8042_detect(void) { return 1; }; ...@@ -101,7 +100,6 @@ static int default_i8042_detect(void) { return 1; };
struct x86_platform_ops x86_platform = { struct x86_platform_ops x86_platform = {
.calibrate_tsc = native_calibrate_tsc, .calibrate_tsc = native_calibrate_tsc,
.wallclock_init = wallclock_init_noop,
.get_wallclock = mach_get_cmos_time, .get_wallclock = mach_get_cmos_time,
.set_wallclock = mach_set_rtc_mmss, .set_wallclock = mach_set_rtc_mmss,
.iommu_shutdown = iommu_shutdown_noop, .iommu_shutdown = iommu_shutdown_noop,
......
...@@ -135,6 +135,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, ...@@ -135,6 +135,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
unsigned long mmr_value; unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry; struct uv_IO_APIC_route_entry *entry;
int mmr_pnode, err; int mmr_pnode, err;
unsigned int dest;
BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
sizeof(unsigned long)); sizeof(unsigned long));
...@@ -143,6 +144,10 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, ...@@ -143,6 +144,10 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
if (err != 0) if (err != 0)
return err; return err;
err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest);
if (err != 0)
return err;
if (limit == UV_AFFINITY_CPU) if (limit == UV_AFFINITY_CPU)
irq_set_status_flags(irq, IRQ_NO_BALANCING); irq_set_status_flags(irq, IRQ_NO_BALANCING);
else else
...@@ -159,7 +164,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, ...@@ -159,7 +164,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
entry->polarity = 0; entry->polarity = 0;
entry->trigger = 0; entry->trigger = 0;
entry->mask = 0; entry->mask = 0;
entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); entry->dest = dest;
mmr_pnode = uv_blade_to_pnode(mmr_blade); mmr_pnode = uv_blade_to_pnode(mmr_blade);
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
...@@ -222,7 +227,7 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, ...@@ -222,7 +227,7 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
if (cfg->move_in_progress) if (cfg->move_in_progress)
send_cleanup_vector(cfg); send_cleanup_vector(cfg);
return 0; return IRQ_SET_MASK_OK_NOCOPY;
} }
/* /*
......
...@@ -902,7 +902,6 @@ static int intel_setup_ioapic_entry(int irq, ...@@ -902,7 +902,6 @@ static int intel_setup_ioapic_entry(int irq,
return 0; return 0;
} }
#ifdef CONFIG_SMP
/* /*
* Migrate the IO-APIC irq in the presence of intr-remapping. * Migrate the IO-APIC irq in the presence of intr-remapping.
* *
...@@ -924,6 +923,10 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, ...@@ -924,6 +923,10 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
struct irq_cfg *cfg = data->chip_data; struct irq_cfg *cfg = data->chip_data;
unsigned int dest, irq = data->irq; unsigned int dest, irq = data->irq;
struct irte irte; struct irte irte;
int err;
if (!config_enabled(CONFIG_SMP))
return -EINVAL;
if (!cpumask_intersects(mask, cpu_online_mask)) if (!cpumask_intersects(mask, cpu_online_mask))
return -EINVAL; return -EINVAL;
...@@ -931,10 +934,16 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, ...@@ -931,10 +934,16 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
if (get_irte(irq, &irte)) if (get_irte(irq, &irte))
return -EBUSY; return -EBUSY;
if (assign_irq_vector(irq, cfg, mask)) err = assign_irq_vector(irq, cfg, mask);
return -EBUSY; if (err)
return err;
dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
if (err) {
if (assign_irq_vector(irq, cfg, data->affinity))
pr_err("Failed to recover vector for irq %d\n", irq);
return err;
}
irte.vector = cfg->vector; irte.vector = cfg->vector;
irte.dest_id = IRTE_DEST(dest); irte.dest_id = IRTE_DEST(dest);
...@@ -956,7 +965,6 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, ...@@ -956,7 +965,6 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
cpumask_copy(data->affinity, mask); cpumask_copy(data->affinity, mask);
return 0; return 0;
} }
#endif
static void intel_compose_msi_msg(struct pci_dev *pdev, static void intel_compose_msi_msg(struct pci_dev *pdev,
unsigned int irq, unsigned int dest, unsigned int irq, unsigned int dest,
...@@ -1058,9 +1066,7 @@ struct irq_remap_ops intel_irq_remap_ops = { ...@@ -1058,9 +1066,7 @@ struct irq_remap_ops intel_irq_remap_ops = {
.reenable = reenable_irq_remapping, .reenable = reenable_irq_remapping,
.enable_faulting = enable_drhd_fault_handling, .enable_faulting = enable_drhd_fault_handling,
.setup_ioapic_entry = intel_setup_ioapic_entry, .setup_ioapic_entry = intel_setup_ioapic_entry,
#ifdef CONFIG_SMP
.set_affinity = intel_ioapic_set_affinity, .set_affinity = intel_ioapic_set_affinity,
#endif
.free_irq = free_irte, .free_irq = free_irte,
.compose_msi_msg = intel_compose_msi_msg, .compose_msi_msg = intel_compose_msi_msg,
.msi_alloc_irq = intel_msi_alloc_irq, .msi_alloc_irq = intel_msi_alloc_irq,
......
...@@ -111,16 +111,15 @@ int setup_ioapic_remapped_entry(int irq, ...@@ -111,16 +111,15 @@ int setup_ioapic_remapped_entry(int irq,
vector, attr); vector, attr);
} }
#ifdef CONFIG_SMP
int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask, int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
bool force) bool force)
{ {
if (!remap_ops || !remap_ops->set_affinity) if (!config_enabled(CONFIG_SMP) || !remap_ops ||
!remap_ops->set_affinity)
return 0; return 0;
return remap_ops->set_affinity(data, mask, force); return remap_ops->set_affinity(data, mask, force);
} }
#endif
void free_remapped_irq(int irq) void free_remapped_irq(int irq)
{ {
......
...@@ -59,11 +59,9 @@ struct irq_remap_ops { ...@@ -59,11 +59,9 @@ struct irq_remap_ops {
unsigned int, int, unsigned int, int,
struct io_apic_irq_attr *); struct io_apic_irq_attr *);
#ifdef CONFIG_SMP
/* Set the CPU affinity of a remapped interrupt */ /* Set the CPU affinity of a remapped interrupt */
int (*set_affinity)(struct irq_data *data, const struct cpumask *mask, int (*set_affinity)(struct irq_data *data, const struct cpumask *mask,
bool force); bool force);
#endif
/* Free an IRQ */ /* Free an IRQ */
int (*free_irq)(int); int (*free_irq)(int);
......
...@@ -150,9 +150,7 @@ struct irq_data { ...@@ -150,9 +150,7 @@ struct irq_data {
void *handler_data; void *handler_data;
void *chip_data; void *chip_data;
struct msi_desc *msi_desc; struct msi_desc *msi_desc;
#ifdef CONFIG_SMP
cpumask_var_t affinity; cpumask_var_t affinity;
#endif
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment