Commit 43f39890 authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar

x86: seperate default_send_IPI_mask_sequence/allbutself from logical

Impact: 32-bit should use logical version

there are two version: for default_send_IPI_mask_sequence/allbutself
one in ipi.h and one in ipi.c for 32bit

it seems .h version overwrote ipi.c for a while.

restore it so 32 bit could use its old logical version.
also remove dupicated functions in .c
Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1ff2f20d
...@@ -120,7 +120,7 @@ static inline void ...@@ -120,7 +120,7 @@ static inline void
} }
static inline void static inline void
default_send_IPI_mask_sequence(const struct cpumask *mask, int vector) default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
{ {
unsigned long query_cpu; unsigned long query_cpu;
unsigned long flags; unsigned long flags;
...@@ -139,7 +139,7 @@ default_send_IPI_mask_sequence(const struct cpumask *mask, int vector) ...@@ -139,7 +139,7 @@ default_send_IPI_mask_sequence(const struct cpumask *mask, int vector)
} }
static inline void static inline void
default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector)
{ {
unsigned int this_cpu = smp_processor_id(); unsigned int this_cpu = smp_processor_id();
unsigned int query_cpu; unsigned int query_cpu;
...@@ -157,23 +157,72 @@ default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) ...@@ -157,23 +157,72 @@ default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
local_irq_restore(flags); local_irq_restore(flags);
} }
#include <asm/genapic.h>
static inline void
default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector)
{
unsigned long flags;
unsigned int query_cpu;
/*
* Hack. The clustered APIC addressing mode doesn't allow us to send
* to an arbitrary mask, so I do a unicasts to each CPU instead. This
* should be modified to do 1 message per cluster ID - mbligh
*/
local_irq_save(flags);
for_each_cpu(query_cpu, mask)
__default_send_IPI_dest_field(
apic->cpu_to_logical_apicid(query_cpu), vector,
apic->dest_logical);
local_irq_restore(flags);
}
static inline void
default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, int vector)
{
unsigned long flags;
unsigned int query_cpu;
unsigned int this_cpu = smp_processor_id();
/* See Hack comment above */
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
if (query_cpu == this_cpu)
continue;
__default_send_IPI_dest_field(
apic->cpu_to_logical_apicid(query_cpu), vector,
apic->dest_logical);
}
local_irq_restore(flags);
}
/* Avoid include hell */ /* Avoid include hell */
#define NMI_VECTOR 0x02 #define NMI_VECTOR 0x02
void default_send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
extern int no_broadcast; extern int no_broadcast;
#ifdef CONFIG_X86_64 #ifndef CONFIG_X86_64
#include <asm/genapic.h> /*
#else * This is only used on smaller machines.
static inline void default_send_IPI_mask(const struct cpumask *mask, int vector) */
static inline void default_send_IPI_mask_bitmask_logical(const struct cpumask *cpumask, int vector)
{
unsigned long mask = cpumask_bits(cpumask)[0];
unsigned long flags;
local_irq_save(flags);
WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
local_irq_restore(flags);
}
static inline void default_send_IPI_mask_logical(const struct cpumask *mask, int vector)
{ {
default_send_IPI_mask_bitmask(mask, vector); default_send_IPI_mask_bitmask_logical(mask, vector);
} }
void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
#endif #endif
static inline void __default_local_send_IPI_allbutself(int vector) static inline void __default_local_send_IPI_allbutself(int vector)
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <asm/genapic.h> #include <asm/genapic.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <asm/ipi.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/dmi.h> #include <linux/dmi.h>
...@@ -154,17 +155,14 @@ static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) ...@@ -154,17 +155,14 @@ static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
return cpuid_apic >> index_msb; return cpuid_apic >> index_msb;
} }
void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector);
void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector) static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
{ {
default_send_IPI_mask_sequence(mask, vector); default_send_IPI_mask_sequence_phys(mask, vector);
} }
static inline void bigsmp_send_IPI_allbutself(int vector) static inline void bigsmp_send_IPI_allbutself(int vector)
{ {
default_send_IPI_mask_allbutself(cpu_online_mask, vector); default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
} }
static inline void bigsmp_send_IPI_all(int vector) static inline void bigsmp_send_IPI_all(int vector)
......
...@@ -451,12 +451,12 @@ static int es7000_check_dsdt(void) ...@@ -451,12 +451,12 @@ static int es7000_check_dsdt(void)
static void es7000_send_IPI_mask(const struct cpumask *mask, int vector) static void es7000_send_IPI_mask(const struct cpumask *mask, int vector)
{ {
default_send_IPI_mask_sequence(mask, vector); default_send_IPI_mask_sequence_phys(mask, vector);
} }
static void es7000_send_IPI_allbutself(int vector) static void es7000_send_IPI_allbutself(int vector)
{ {
default_send_IPI_mask_allbutself(cpu_online_mask, vector); default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
} }
static void es7000_send_IPI_all(int vector) static void es7000_send_IPI_all(int vector)
......
...@@ -267,18 +267,18 @@ static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) ...@@ -267,18 +267,18 @@ static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
{ {
default_send_IPI_mask_sequence(cpumask, vector); default_send_IPI_mask_sequence_phys(cpumask, vector);
} }
static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
int vector) int vector)
{ {
default_send_IPI_mask_allbutself(cpumask, vector); default_send_IPI_mask_allbutself_phys(cpumask, vector);
} }
static void physflat_send_IPI_allbutself(int vector) static void physflat_send_IPI_allbutself(int vector)
{ {
default_send_IPI_mask_allbutself(cpu_online_mask, vector); default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
} }
static void physflat_send_IPI_all(int vector) static void physflat_send_IPI_all(int vector)
......
...@@ -17,148 +17,13 @@ ...@@ -17,148 +17,13 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/ipi.h>
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#include <asm/genapic.h>
/*
* the following functions deal with sending IPIs between CPUs.
*
* We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
*/
static inline int __prepare_ICR(unsigned int shortcut, int vector)
{
unsigned int icr = shortcut | apic->dest_logical;
switch (vector) {
default:
icr |= APIC_DM_FIXED | vector;
break;
case NMI_VECTOR:
icr |= APIC_DM_NMI;
break;
}
return icr;
}
static inline int __prepare_ICR2(unsigned int mask)
{
return SET_APIC_DEST_FIELD(mask);
}
void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
{
/*
* Subtle. In the case of the 'never do double writes' workaround
* we have to lock out interrupts to be safe. As we don't care
* of the value read we use an atomic rmw access to avoid costly
* cli/sti. Otherwise we use an even cheaper single atomic write
* to the APIC.
*/
unsigned int cfg;
/*
* Wait for idle.
*/
apic_wait_icr_idle();
/*
* No need to touch the target chip field
*/
cfg = __prepare_ICR(shortcut, vector);
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
apic_write(APIC_ICR, cfg);
}
void default_send_IPI_self(int vector) void default_send_IPI_self(int vector)
{ {
__default_send_IPI_shortcut(APIC_DEST_SELF, vector); __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
}
/*
* This is used to send an IPI with no shorthand notation (the destination is
* specified in bits 56 to 63 of the ICR).
*/
static inline void __default_send_IPI_dest_field(unsigned long mask, int vector)
{
unsigned long cfg;
/*
* Wait for idle.
*/
if (unlikely(vector == NMI_VECTOR))
safe_apic_wait_icr_idle();
else
apic_wait_icr_idle();
/*
* prepare target chip field
*/
cfg = __prepare_ICR2(mask);
apic_write(APIC_ICR2, cfg);
/*
* program the ICR
*/
cfg = __prepare_ICR(0, vector);
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
apic_write(APIC_ICR, cfg);
}
/*
* This is only used on smaller machines.
*/
void default_send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
{
unsigned long mask = cpumask_bits(cpumask)[0];
unsigned long flags;
local_irq_save(flags);
WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
__default_send_IPI_dest_field(mask, vector);
local_irq_restore(flags);
}
void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector)
{
unsigned long flags;
unsigned int query_cpu;
/*
* Hack. The clustered APIC addressing mode doesn't allow us to send
* to an arbitrary mask, so I do a unicasts to each CPU instead. This
* should be modified to do 1 message per cluster ID - mbligh
*/
local_irq_save(flags);
for_each_cpu(query_cpu, mask)
__default_send_IPI_dest_field(apic->cpu_to_logical_apicid(query_cpu), vector);
local_irq_restore(flags);
}
void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
{
unsigned long flags;
unsigned int query_cpu;
unsigned int this_cpu = smp_processor_id();
/* See Hack comment above */
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
if (query_cpu == this_cpu)
continue;
__default_send_IPI_dest_field(
apic->cpu_to_logical_apicid(query_cpu), vector);
}
local_irq_restore(flags);
} }
/* must come after the send_IPI functions above for inlining */ /* must come after the send_IPI functions above for inlining */
......
...@@ -302,6 +302,7 @@ int __init get_memcfg_numaq(void) ...@@ -302,6 +302,7 @@ int __init get_memcfg_numaq(void)
#include <asm/genapic.h> #include <asm/genapic.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <asm/ipi.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -319,17 +320,14 @@ static inline unsigned int numaq_get_apic_id(unsigned long x) ...@@ -319,17 +320,14 @@ static inline unsigned int numaq_get_apic_id(unsigned long x)
return (x >> 24) & 0x0F; return (x >> 24) & 0x0F;
} }
void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector);
void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector) static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector)
{ {
default_send_IPI_mask_sequence(mask, vector); default_send_IPI_mask_sequence_logical(mask, vector);
} }
static inline void numaq_send_IPI_allbutself(int vector) static inline void numaq_send_IPI_allbutself(int vector)
{ {
default_send_IPI_mask_allbutself(cpu_online_mask, vector); default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
} }
static inline void numaq_send_IPI_all(int vector) static inline void numaq_send_IPI_all(int vector)
......
...@@ -112,8 +112,8 @@ struct genapic apic_default = { ...@@ -112,8 +112,8 @@ struct genapic apic_default = {
.cpu_mask_to_apicid = default_cpu_mask_to_apicid, .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = default_send_IPI_mask, .send_IPI_mask = default_send_IPI_mask_logical,
.send_IPI_mask_allbutself = default_send_IPI_mask_allbutself, .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical,
.send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_allbutself = default_send_IPI_allbutself,
.send_IPI_all = default_send_IPI_all, .send_IPI_all = default_send_IPI_all,
.send_IPI_self = NULL, .send_IPI_self = NULL,
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <asm/genapic.h> #include <asm/genapic.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <asm/ipi.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -54,12 +55,9 @@ static inline unsigned summit_get_apic_id(unsigned long x) ...@@ -54,12 +55,9 @@ static inline unsigned summit_get_apic_id(unsigned long x)
return (x >> 24) & 0xFF; return (x >> 24) & 0xFF;
} }
void default_send_IPI_mask_sequence(const cpumask_t *mask, int vector);
void default_send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector) static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
{ {
default_send_IPI_mask_sequence(mask, vector); default_send_IPI_mask_sequence_logical(mask, vector);
} }
static inline void summit_send_IPI_allbutself(int vector) static inline void summit_send_IPI_allbutself(int vector)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment